diff --git a/.github/workflows/rust-ci-full.yml b/.github/workflows/rust-ci-full.yml index 146e57524c..97fa33283e 100644 --- a/.github/workflows/rust-ci-full.yml +++ b/.github/workflows/rust-ci-full.yml @@ -3,6 +3,7 @@ on: push: branches: - main + - "**full-ci**" workflow_dispatch: # CI builds in debug (dev) for faster signal. diff --git a/AGENTS.md b/AGENTS.md index c8d989fe9d..297fb0fe15 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -21,6 +21,7 @@ In the codex-rs folder where the rust code lives: - Newly added traits should include doc comments that explain their role and how implementations are expected to use them. - When writing tests, prefer comparing the equality of entire objects over fields one by one. - When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable. +- Prefer private modules and explicitly exported public crate API. - If you change `ConfigToml` or nested config types, run `just write-config-schema` to update `codex-rs/core/config.schema.json`. - If you change Rust dependencies (`Cargo.toml` or `Cargo.lock`), run `just bazel-lock-update` from the repo root to refresh `MODULE.bazel.lock`, and include that lockfile update in the same change. diff --git a/MODULE.bazel b/MODULE.bazel index b42936fde5..bcc4ec076c 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -228,10 +228,18 @@ inject_repo(crate, "zstd") use_repo(crate, "argument_comment_lint_crates") bazel_dep(name = "bzip2", version = "1.0.8.bcr.3") +single_version_override( + module_name = "bzip2", + patch_strip = 1, + patches = [ + "//patches:bzip2_windows_stack_args.patch", + ], +) crate.annotation( crate = "bzip2-sys", - gen_build_script = "on", + gen_build_script = "off", + deps = ["@bzip2//:bz2"], ) inject_repo(crate, "bzip2") @@ -245,14 +253,25 @@ crate.annotation( inject_repo(crate, "zlib") -# TODO(zbarsky): Enable annotation after fixing windows arm64 builds. +bazel_dep(name = "xz", version = "5.4.5.bcr.8") +single_version_override( + module_name = "xz", + patch_strip = 1, + patches = [ + "//patches:xz_windows_stack_args.patch", + ], +) + crate.annotation( crate = "lzma-sys", - gen_build_script = "on", + gen_build_script = "off", + deps = ["@xz//:lzma"], ) bazel_dep(name = "openssl", version = "3.5.4.bcr.0") +inject_repo(crate, "xz") + crate.annotation( build_script_data = [ "@openssl//:gen_dir", diff --git a/MODULE.bazel.lock b/MODULE.bazel.lock index 1e2123ec49..746679af39 100644 --- a/MODULE.bazel.lock +++ b/MODULE.bazel.lock @@ -228,6 +228,8 @@ "https://bcr.bazel.build/modules/upb/0.0.0-20220923-a547704/MODULE.bazel": "7298990c00040a0e2f121f6c32544bab27d4452f80d9ce51349b1a28f3005c43", "https://bcr.bazel.build/modules/with_cfg.bzl/0.12.0/MODULE.bazel": "b573395fe63aef4299ba095173e2f62ccfee5ad9bbf7acaa95dba73af9fc2b38", "https://bcr.bazel.build/modules/with_cfg.bzl/0.12.0/source.json": "3f3fbaeafecaf629877ad152a2c9def21f8d330d91aa94c5dc75bbb98c10b8b8", + "https://bcr.bazel.build/modules/xz/5.4.5.bcr.8/MODULE.bazel": "e48a69bd54053c2ec5fffc2a29fb70122afd3e83ab6c07068f63bc6553fa57cc", + "https://bcr.bazel.build/modules/xz/5.4.5.bcr.8/source.json": "bd7e928ccd63505b44f4784f7bbf12cc11f9ff23bf3ca12ff2c91cd74846099e", "https://bcr.bazel.build/modules/zlib/1.2.11/MODULE.bazel": "07b389abc85fdbca459b69e2ec656ae5622873af3f845e1c9d80fe179f3effa0", "https://bcr.bazel.build/modules/zlib/1.3.1.bcr.5/MODULE.bazel": "eec517b5bbe5492629466e11dae908d043364302283de25581e3eb944326c4ca", "https://bcr.bazel.build/modules/zlib/1.3.1.bcr.8/MODULE.bazel": "772c674bb78a0342b8caf32ab5c25085c493ca4ff08398208dcbe4375fe9f776", diff --git a/README.md b/README.md index 1e44875f21..5cc7fd4953 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Each archive contains a single entry with the platform baked into the name (e.g. ### Using Codex with your ChatGPT plan -Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Team, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt). +Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Business, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt). You can also use Codex with an API key, but this requires [additional setup](https://developers.openai.com/codex/auth#sign-in-with-an-api-key). diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index f45a752e0d..9c06deae5c 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -408,6 +408,7 @@ dependencies = [ "base64 0.22.1", "chrono", "codex-app-server-protocol", + "codex-config", "codex-core", "codex-features", "codex-login", @@ -1433,9 +1434,11 @@ dependencies = [ "codex-utils-cli", "codex-utils-json-to-toml", "codex-utils-pty", + "codex-utils-rustls-provider", "constant_time_eq", "core_test_support", "futures", + "gethostname", "hmac", "jsonwebtoken", "opentelemetry", @@ -1458,6 +1461,7 @@ dependencies = [ "tracing", "tracing-opentelemetry", "tracing-subscriber", + "url", "uuid", "wiremock", ] @@ -1492,6 +1496,7 @@ dependencies = [ "codex-experimental-api-macros", "codex-git-utils", "codex-protocol", + "codex-shell-command", "codex-utils-absolute-path", "codex-utils-cargo-bin", "inventory", @@ -1501,7 +1506,6 @@ dependencies = [ "serde", "serde_json", "serde_with", - "shlex", "similar", "strum_macros 0.28.0", "tempfile", @@ -1539,11 +1543,14 @@ dependencies = [ "anyhow", "assert_cmd", "assert_matches", + "codex-exec-server", + "codex-utils-absolute-path", "codex-utils-cargo-bin", "pretty_assertions", "similar", "tempfile", "thiserror 2.0.18", + "tokio", "tree-sitter", "tree-sitter-bash", ] @@ -1554,9 +1561,11 @@ version = "0.0.0" dependencies = [ "anyhow", "codex-apply-patch", + "codex-exec-server", "codex-linux-sandbox", "codex-sandboxing", "codex-shell-escalation", + "codex-utils-absolute-path", "codex-utils-home-dir", "dotenvy", "tempfile", @@ -1603,6 +1612,7 @@ version = "0.0.0" dependencies = [ "anyhow", "clap", + "codex-config", "codex-connectors", "codex-core", "codex-git-utils", @@ -1705,6 +1715,7 @@ dependencies = [ "base64 0.22.1", "chrono", "codex-backend-client", + "codex-config", "codex-core", "codex-login", "codex-otel", @@ -1802,8 +1813,13 @@ dependencies = [ "anyhow", "codex-app-server-protocol", "codex-execpolicy", + "codex-features", + "codex-git-utils", + "codex-model-provider-info", + "codex-network-proxy", "codex-protocol", "codex-utils-absolute-path", + "dunce", "futures", "multimap", "pretty_assertions", @@ -1923,7 +1939,6 @@ dependencies = [ "regex-lite", "reqwest", "rmcp", - "schemars 0.8.22", "serde", "serde_json", "serial_test", @@ -2114,7 +2129,6 @@ dependencies = [ name = "codex-features" version = "0.0.0" dependencies = [ - "codex-login", "codex-otel", "codex-protocol", "pretty_assertions", @@ -2270,7 +2284,6 @@ dependencies = [ "rand 0.9.2", "regex-lite", "reqwest", - "schemars 0.8.22", "serde", "serde_json", "serial_test", @@ -2373,6 +2386,7 @@ dependencies = [ "codex-api", "codex-app-server-protocol", "codex-collaboration-mode-templates", + "codex-config", "codex-feedback", "codex-login", "codex-model-provider-info", @@ -2568,6 +2582,7 @@ dependencies = [ "anyhow", "axum", "codex-client", + "codex-config", "codex-keyring-store", "codex-protocol", "codex-utils-cargo-bin", @@ -2579,7 +2594,6 @@ dependencies = [ "pretty_assertions", "reqwest", "rmcp", - "schemars 0.8.22", "serde", "serde_json", "serial_test", @@ -2787,6 +2801,7 @@ dependencies = [ "codex-cloud-requirements", "codex-config", "codex-core", + "codex-exec-server", "codex-features", "codex-feedback", "codex-file-search", @@ -2870,7 +2885,6 @@ name = "codex-utils-absolute-path" version = "0.0.0" dependencies = [ "dirs", - "path-absolutize", "pretty_assertions", "schemars 0.8.22", "serde", diff --git a/codex-rs/app-server-client/src/lib.rs b/codex-rs/app-server-client/src/lib.rs index 39768820de..f7f24eacdf 100644 --- a/codex-rs/app-server-client/src/lib.rs +++ b/codex-rs/app-server-client/src/lib.rs @@ -1060,6 +1060,9 @@ mod tests { items: Vec::new(), status: codex_app_server_protocol::TurnStatus::Completed, error: None, + started_at: None, + completed_at: Some(0), + duration_ms: Some(1), }, }) } @@ -1834,6 +1837,9 @@ mod tests { items: Vec::new(), status: codex_app_server_protocol::TurnStatus::Completed, error: None, + started_at: None, + completed_at: Some(0), + duration_ms: None, }, } ) diff --git a/codex-rs/app-server-protocol/Cargo.toml b/codex-rs/app-server-protocol/Cargo.toml index a9d90831ef..d9ed5e8730 100644 --- a/codex-rs/app-server-protocol/Cargo.toml +++ b/codex-rs/app-server-protocol/Cargo.toml @@ -17,12 +17,12 @@ clap = { workspace = true, features = ["derive"] } codex-experimental-api-macros = { workspace = true } codex-git-utils = { workspace = true } codex-protocol = { workspace = true } +codex-shell-command = { workspace = true } codex-utils-absolute-path = { workspace = true } schemars = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } serde_with = { workspace = true } -shlex = { workspace = true } strum_macros = { workspace = true } thiserror = { workspace = true } rmcp = { workspace = true, default-features = false, features = [ diff --git a/codex-rs/app-server-protocol/schema/json/ClientRequest.json b/codex-rs/app-server-protocol/schema/json/ClientRequest.json index 7c94419844..5039a0bfe7 100644 --- a/codex-rs/app-server-protocol/schema/json/ClientRequest.json +++ b/codex-rs/app-server-protocol/schema/json/ClientRequest.json @@ -800,7 +800,7 @@ "description": "Stop filesystem watch notifications for a prior `fs/watch`.", "properties": { "watchId": { - "description": "Watch identifier returned by `fs/watch`.", + "description": "Watch identifier previously provided to `fs/watch`.", "type": "string" } }, @@ -819,10 +819,15 @@ } ], "description": "Absolute file or directory path to watch." + }, + "watchId": { + "description": "Connection-scoped watch identifier used for `fs/unwatch` and `fs/changed`.", + "type": "string" } }, "required": [ - "path" + "path", + "watchId" ], "type": "object" }, @@ -1042,6 +1047,17 @@ "null" ] }, + "detail": { + "anyOf": [ + { + "$ref": "#/definitions/McpServerStatusDetail" + }, + { + "type": "null" + } + ], + "description": "Controls how much MCP inventory data to fetch for each server. Defaults to `Full` when omitted." + }, "limit": { "description": "Optional page size; defaults to a server-defined value.", "format": "uint32", @@ -1208,6 +1224,25 @@ } ] }, + "McpResourceReadParams": { + "properties": { + "server": { + "type": "string" + }, + "threadId": { + "type": "string" + }, + "uri": { + "type": "string" + } + }, + "required": [ + "server", + "threadId", + "uri" + ], + "type": "object" + }, "McpServerOauthLoginParams": { "properties": { "name": { @@ -1235,6 +1270,13 @@ ], "type": "object" }, + "McpServerStatusDetail": { + "enum": [ + "full", + "toolsAndAuthOnly" + ], + "type": "string" + }, "MergeStrategy": { "enum": [ "replace", @@ -4420,6 +4462,30 @@ "title": "McpServerStatus/listRequest", "type": "object" }, + { + "properties": { + "id": { + "$ref": "#/definitions/RequestId" + }, + "method": { + "enum": [ + "mcpServer/resource/read" + ], + "title": "McpServer/resource/readRequestMethod", + "type": "string" + }, + "params": { + "$ref": "#/definitions/McpResourceReadParams" + } + }, + "required": [ + "id", + "method", + "params" + ], + "title": "McpServer/resource/readRequest", + "type": "object" + }, { "properties": { "id": { diff --git a/codex-rs/app-server-protocol/schema/json/ServerNotification.json b/codex-rs/app-server-protocol/schema/json/ServerNotification.json index b8b539a03b..d550f523ce 100644 --- a/codex-rs/app-server-protocol/schema/json/ServerNotification.json +++ b/codex-rs/app-server-protocol/schema/json/ServerNotification.json @@ -1013,7 +1013,7 @@ "type": "array" }, "watchId": { - "description": "Watch identifier returned by `fs/watch`.", + "description": "Watch identifier previously provided to `fs/watch`.", "type": "string" } }, @@ -1736,6 +1736,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -3532,6 +3533,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -3553,6 +3570,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json index a589903032..da21699875 100644 --- a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json +++ b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json @@ -1201,6 +1201,30 @@ "title": "McpServerStatus/listRequest", "type": "object" }, + { + "properties": { + "id": { + "$ref": "#/definitions/v2/RequestId" + }, + "method": { + "enum": [ + "mcpServer/resource/read" + ], + "title": "McpServer/resource/readRequestMethod", + "type": "string" + }, + "params": { + "$ref": "#/definitions/v2/McpResourceReadParams" + } + }, + "required": [ + "id", + "method", + "params" + ], + "title": "McpServer/resource/readRequest", + "type": "object" + }, { "properties": { "id": { @@ -7487,7 +7511,7 @@ "type": "array" }, "watchId": { - "description": "Watch identifier returned by `fs/watch`.", + "description": "Watch identifier previously provided to `fs/watch`.", "type": "string" } }, @@ -7757,7 +7781,7 @@ "description": "Stop filesystem watch notifications for a prior `fs/watch`.", "properties": { "watchId": { - "description": "Watch identifier returned by `fs/watch`.", + "description": "Watch identifier previously provided to `fs/watch`.", "type": "string" } }, @@ -7784,25 +7808,6 @@ } ], "description": "Absolute file or directory path to watch." - } - }, - "required": [ - "path" - ], - "title": "FsWatchParams", - "type": "object" - }, - "FsWatchResponse": { - "$schema": "http://json-schema.org/draft-07/schema#", - "description": "Created watch handle returned by `fs/watch`.", - "properties": { - "path": { - "allOf": [ - { - "$ref": "#/definitions/v2/AbsolutePathBuf" - } - ], - "description": "Canonicalized path associated with the watch." }, "watchId": { "description": "Connection-scoped watch identifier used for `fs/unwatch` and `fs/changed`.", @@ -7813,6 +7818,25 @@ "path", "watchId" ], + "title": "FsWatchParams", + "type": "object" + }, + "FsWatchResponse": { + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "Successful response for `fs/watch`.", + "properties": { + "path": { + "allOf": [ + { + "$ref": "#/definitions/v2/AbsolutePathBuf" + } + ], + "description": "Canonicalized path associated with the watch." + } + }, + "required": [ + "path" + ], "title": "FsWatchResponse", "type": "object" }, @@ -8604,6 +8628,17 @@ "null" ] }, + "detail": { + "anyOf": [ + { + "$ref": "#/definitions/v2/McpServerStatusDetail" + }, + { + "type": "null" + } + ], + "description": "Controls how much MCP inventory data to fetch for each server. Defaults to `Full` when omitted." + }, "limit": { "description": "Optional page size; defaults to a server-defined value.", "format": "uint32", @@ -8929,6 +8964,43 @@ ], "type": "string" }, + "McpResourceReadParams": { + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "server": { + "type": "string" + }, + "threadId": { + "type": "string" + }, + "uri": { + "type": "string" + } + }, + "required": [ + "server", + "threadId", + "uri" + ], + "title": "McpResourceReadParams", + "type": "object" + }, + "McpResourceReadResponse": { + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "contents": { + "items": { + "$ref": "#/definitions/v2/ResourceContent" + }, + "type": "array" + } + }, + "required": [ + "contents" + ], + "title": "McpResourceReadResponse", + "type": "object" + }, "McpServerOauthLoginCompletedNotification": { "$schema": "http://json-schema.org/draft-07/schema#", "properties": { @@ -9044,6 +9116,13 @@ ], "type": "object" }, + "McpServerStatusDetail": { + "enum": [ + "full", + "toolsAndAuthOnly" + ], + "type": "string" + }, "McpServerStatusUpdatedNotification": { "$schema": "http://json-schema.org/draft-07/schema#", "properties": { @@ -9105,6 +9184,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -9473,6 +9553,12 @@ "null" ] }, + "dangerFullAccessDenylistOnly": { + "type": [ + "boolean", + "null" + ] + }, "dangerouslyAllowAllUnixSockets": { "type": [ "boolean", @@ -10643,6 +10729,57 @@ ], "type": "object" }, + "ResourceContent": { + "anyOf": [ + { + "properties": { + "_meta": true, + "mimeType": { + "type": [ + "string", + "null" + ] + }, + "text": { + "type": "string" + }, + "uri": { + "description": "The URI of this resource.", + "type": "string" + } + }, + "required": [ + "text", + "uri" + ], + "type": "object" + }, + { + "properties": { + "_meta": true, + "blob": { + "type": "string" + }, + "mimeType": { + "type": [ + "string", + "null" + ] + }, + "uri": { + "description": "The URI of this resource.", + "type": "string" + } + }, + "required": [ + "blob", + "uri" + ], + "type": "object" + } + ], + "description": "Contents returned when reading a resource from an MCP server." + }, "ResourceTemplate": { "description": "A template description for resources available on the server.", "properties": { @@ -14329,6 +14466,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -14350,6 +14503,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/v2/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json index f041f8aae8..a66c34e14d 100644 --- a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json +++ b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.v2.schemas.json @@ -1776,6 +1776,30 @@ "title": "McpServerStatus/listRequest", "type": "object" }, + { + "properties": { + "id": { + "$ref": "#/definitions/RequestId" + }, + "method": { + "enum": [ + "mcpServer/resource/read" + ], + "title": "McpServer/resource/readRequestMethod", + "type": "string" + }, + "params": { + "$ref": "#/definitions/McpResourceReadParams" + } + }, + "required": [ + "id", + "method", + "params" + ], + "title": "McpServer/resource/readRequest", + "type": "object" + }, { "properties": { "id": { @@ -4155,7 +4179,7 @@ "type": "array" }, "watchId": { - "description": "Watch identifier returned by `fs/watch`.", + "description": "Watch identifier previously provided to `fs/watch`.", "type": "string" } }, @@ -4425,7 +4449,7 @@ "description": "Stop filesystem watch notifications for a prior `fs/watch`.", "properties": { "watchId": { - "description": "Watch identifier returned by `fs/watch`.", + "description": "Watch identifier previously provided to `fs/watch`.", "type": "string" } }, @@ -4452,25 +4476,6 @@ } ], "description": "Absolute file or directory path to watch." - } - }, - "required": [ - "path" - ], - "title": "FsWatchParams", - "type": "object" - }, - "FsWatchResponse": { - "$schema": "http://json-schema.org/draft-07/schema#", - "description": "Created watch handle returned by `fs/watch`.", - "properties": { - "path": { - "allOf": [ - { - "$ref": "#/definitions/AbsolutePathBuf" - } - ], - "description": "Canonicalized path associated with the watch." }, "watchId": { "description": "Connection-scoped watch identifier used for `fs/unwatch` and `fs/changed`.", @@ -4481,6 +4486,25 @@ "path", "watchId" ], + "title": "FsWatchParams", + "type": "object" + }, + "FsWatchResponse": { + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "Successful response for `fs/watch`.", + "properties": { + "path": { + "allOf": [ + { + "$ref": "#/definitions/AbsolutePathBuf" + } + ], + "description": "Canonicalized path associated with the watch." + } + }, + "required": [ + "path" + ], "title": "FsWatchResponse", "type": "object" }, @@ -5427,6 +5451,17 @@ "null" ] }, + "detail": { + "anyOf": [ + { + "$ref": "#/definitions/McpServerStatusDetail" + }, + { + "type": "null" + } + ], + "description": "Controls how much MCP inventory data to fetch for each server. Defaults to `Full` when omitted." + }, "limit": { "description": "Optional page size; defaults to a server-defined value.", "format": "uint32", @@ -5752,6 +5787,43 @@ ], "type": "string" }, + "McpResourceReadParams": { + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "server": { + "type": "string" + }, + "threadId": { + "type": "string" + }, + "uri": { + "type": "string" + } + }, + "required": [ + "server", + "threadId", + "uri" + ], + "title": "McpResourceReadParams", + "type": "object" + }, + "McpResourceReadResponse": { + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "contents": { + "items": { + "$ref": "#/definitions/ResourceContent" + }, + "type": "array" + } + }, + "required": [ + "contents" + ], + "title": "McpResourceReadResponse", + "type": "object" + }, "McpServerOauthLoginCompletedNotification": { "$schema": "http://json-schema.org/draft-07/schema#", "properties": { @@ -5867,6 +5939,13 @@ ], "type": "object" }, + "McpServerStatusDetail": { + "enum": [ + "full", + "toolsAndAuthOnly" + ], + "type": "string" + }, "McpServerStatusUpdatedNotification": { "$schema": "http://json-schema.org/draft-07/schema#", "properties": { @@ -5928,6 +6007,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -6296,6 +6376,12 @@ "null" ] }, + "dangerFullAccessDenylistOnly": { + "type": [ + "boolean", + "null" + ] + }, "dangerouslyAllowAllUnixSockets": { "type": [ "boolean", @@ -7466,6 +7552,57 @@ ], "type": "object" }, + "ResourceContent": { + "anyOf": [ + { + "properties": { + "_meta": true, + "mimeType": { + "type": [ + "string", + "null" + ] + }, + "text": { + "type": "string" + }, + "uri": { + "description": "The URI of this resource.", + "type": "string" + } + }, + "required": [ + "text", + "uri" + ], + "type": "object" + }, + { + "properties": { + "_meta": true, + "blob": { + "type": "string" + }, + "mimeType": { + "type": [ + "string", + "null" + ] + }, + "uri": { + "description": "The URI of this resource.", + "type": "string" + } + }, + "required": [ + "blob", + "uri" + ], + "type": "object" + } + ], + "description": "Contents returned when reading a resource from an MCP server." + }, "ResourceTemplate": { "description": "A template description for resources available on the server.", "properties": { @@ -12184,6 +12321,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -12205,6 +12358,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/v2/ConfigRequirementsReadResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ConfigRequirementsReadResponse.json index 614575a955..ae6eb1dc7d 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ConfigRequirementsReadResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ConfigRequirementsReadResponse.json @@ -151,6 +151,12 @@ "null" ] }, + "dangerFullAccessDenylistOnly": { + "type": [ + "boolean", + "null" + ] + }, "dangerouslyAllowAllUnixSockets": { "type": [ "boolean", diff --git a/codex-rs/app-server-protocol/schema/json/v2/FsChangedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/FsChangedNotification.json index ab26588ecb..cfb9f4e5ba 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/FsChangedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/FsChangedNotification.json @@ -16,7 +16,7 @@ "type": "array" }, "watchId": { - "description": "Watch identifier returned by `fs/watch`.", + "description": "Watch identifier previously provided to `fs/watch`.", "type": "string" } }, diff --git a/codex-rs/app-server-protocol/schema/json/v2/FsUnwatchParams.json b/codex-rs/app-server-protocol/schema/json/v2/FsUnwatchParams.json index 4b988d97aa..f46800e997 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/FsUnwatchParams.json +++ b/codex-rs/app-server-protocol/schema/json/v2/FsUnwatchParams.json @@ -3,7 +3,7 @@ "description": "Stop filesystem watch notifications for a prior `fs/watch`.", "properties": { "watchId": { - "description": "Watch identifier returned by `fs/watch`.", + "description": "Watch identifier previously provided to `fs/watch`.", "type": "string" } }, diff --git a/codex-rs/app-server-protocol/schema/json/v2/FsWatchParams.json b/codex-rs/app-server-protocol/schema/json/v2/FsWatchParams.json index cf80c7b101..29a1ceea14 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/FsWatchParams.json +++ b/codex-rs/app-server-protocol/schema/json/v2/FsWatchParams.json @@ -15,10 +15,15 @@ } ], "description": "Absolute file or directory path to watch." + }, + "watchId": { + "description": "Connection-scoped watch identifier used for `fs/unwatch` and `fs/changed`.", + "type": "string" } }, "required": [ - "path" + "path", + "watchId" ], "title": "FsWatchParams", "type": "object" diff --git a/codex-rs/app-server-protocol/schema/json/v2/FsWatchResponse.json b/codex-rs/app-server-protocol/schema/json/v2/FsWatchResponse.json index b516636a09..abc7d466bb 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/FsWatchResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/FsWatchResponse.json @@ -6,7 +6,7 @@ "type": "string" } }, - "description": "Created watch handle returned by `fs/watch`.", + "description": "Successful response for `fs/watch`.", "properties": { "path": { "allOf": [ @@ -15,15 +15,10 @@ } ], "description": "Canonicalized path associated with the watch." - }, - "watchId": { - "description": "Connection-scoped watch identifier used for `fs/unwatch` and `fs/changed`.", - "type": "string" } }, "required": [ - "path", - "watchId" + "path" ], "title": "FsWatchResponse", "type": "object" diff --git a/codex-rs/app-server-protocol/schema/json/v2/ItemCompletedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/ItemCompletedNotification.json index 3964107865..2883670c88 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ItemCompletedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ItemCompletedNotification.json @@ -294,6 +294,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" diff --git a/codex-rs/app-server-protocol/schema/json/v2/ItemStartedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/ItemStartedNotification.json index abb8aee5dc..c2e71ccba9 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ItemStartedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ItemStartedNotification.json @@ -294,6 +294,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" diff --git a/codex-rs/app-server-protocol/schema/json/v2/ListMcpServerStatusParams.json b/codex-rs/app-server-protocol/schema/json/v2/ListMcpServerStatusParams.json index e78dbeac16..52149d9fb8 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ListMcpServerStatusParams.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ListMcpServerStatusParams.json @@ -1,5 +1,14 @@ { "$schema": "http://json-schema.org/draft-07/schema#", + "definitions": { + "McpServerStatusDetail": { + "enum": [ + "full", + "toolsAndAuthOnly" + ], + "type": "string" + } + }, "properties": { "cursor": { "description": "Opaque pagination cursor returned by a previous call.", @@ -8,6 +17,17 @@ "null" ] }, + "detail": { + "anyOf": [ + { + "$ref": "#/definitions/McpServerStatusDetail" + }, + { + "type": "null" + } + ], + "description": "Controls how much MCP inventory data to fetch for each server. Defaults to `Full` when omitted." + }, "limit": { "description": "Optional page size; defaults to a server-defined value.", "format": "uint32", diff --git a/codex-rs/app-server-protocol/schema/json/v2/McpResourceReadParams.json b/codex-rs/app-server-protocol/schema/json/v2/McpResourceReadParams.json new file mode 100644 index 0000000000..0242d4148b --- /dev/null +++ b/codex-rs/app-server-protocol/schema/json/v2/McpResourceReadParams.json @@ -0,0 +1,21 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "properties": { + "server": { + "type": "string" + }, + "threadId": { + "type": "string" + }, + "uri": { + "type": "string" + } + }, + "required": [ + "server", + "threadId", + "uri" + ], + "title": "McpResourceReadParams", + "type": "object" +} \ No newline at end of file diff --git a/codex-rs/app-server-protocol/schema/json/v2/McpResourceReadResponse.json b/codex-rs/app-server-protocol/schema/json/v2/McpResourceReadResponse.json new file mode 100644 index 0000000000..b1a4012344 --- /dev/null +++ b/codex-rs/app-server-protocol/schema/json/v2/McpResourceReadResponse.json @@ -0,0 +1,69 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "definitions": { + "ResourceContent": { + "anyOf": [ + { + "properties": { + "_meta": true, + "mimeType": { + "type": [ + "string", + "null" + ] + }, + "text": { + "type": "string" + }, + "uri": { + "description": "The URI of this resource.", + "type": "string" + } + }, + "required": [ + "text", + "uri" + ], + "type": "object" + }, + { + "properties": { + "_meta": true, + "blob": { + "type": "string" + }, + "mimeType": { + "type": [ + "string", + "null" + ] + }, + "uri": { + "description": "The URI of this resource.", + "type": "string" + } + }, + "required": [ + "blob", + "uri" + ], + "type": "object" + } + ], + "description": "Contents returned when reading a resource from an MCP server." + } + }, + "properties": { + "contents": { + "items": { + "$ref": "#/definitions/ResourceContent" + }, + "type": "array" + } + }, + "required": [ + "contents" + ], + "title": "McpResourceReadResponse", + "type": "object" +} \ No newline at end of file diff --git a/codex-rs/app-server-protocol/schema/json/v2/ReviewStartResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ReviewStartResponse.json index 2e0c3605e7..a7fe2e8d60 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ReviewStartResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ReviewStartResponse.json @@ -430,6 +430,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -1267,6 +1268,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -1288,6 +1305,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadForkResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadForkResponse.json index 88448a1658..9b1870198a 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadForkResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadForkResponse.json @@ -518,6 +518,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -1856,6 +1857,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -1877,6 +1894,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadListResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadListResponse.json index f26bd03a34..426f34ce35 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadListResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadListResponse.json @@ -456,6 +456,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -1614,6 +1615,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -1635,6 +1652,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadMetadataUpdateResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadMetadataUpdateResponse.json index 88c8e688df..c869a79749 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadMetadataUpdateResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadMetadataUpdateResponse.json @@ -456,6 +456,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -1614,6 +1615,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -1635,6 +1652,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadReadResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadReadResponse.json index 8453207380..9569860c38 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadReadResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadReadResponse.json @@ -456,6 +456,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -1614,6 +1615,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -1635,6 +1652,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadResumeResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadResumeResponse.json index e21f253b72..5143545ec1 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadResumeResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadResumeResponse.json @@ -518,6 +518,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -1856,6 +1857,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -1877,6 +1894,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadRollbackResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadRollbackResponse.json index d719ba7d8f..502dd3961f 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadRollbackResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadRollbackResponse.json @@ -456,6 +456,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -1614,6 +1615,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -1635,6 +1652,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadStartResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadStartResponse.json index 27a8cdd6bf..c07b4d8258 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadStartResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadStartResponse.json @@ -518,6 +518,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -1856,6 +1857,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -1877,6 +1894,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadStartedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadStartedNotification.json index c202363e3b..ff87af2069 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadStartedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadStartedNotification.json @@ -456,6 +456,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -1614,6 +1615,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -1635,6 +1652,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadUnarchiveResponse.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadUnarchiveResponse.json index 542aea1765..daf821c374 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadUnarchiveResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadUnarchiveResponse.json @@ -456,6 +456,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -1614,6 +1615,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -1635,6 +1652,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/v2/TurnCompletedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/TurnCompletedNotification.json index 770cc920cf..82c2b3c76c 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/TurnCompletedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/TurnCompletedNotification.json @@ -430,6 +430,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -1267,6 +1268,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -1288,6 +1305,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/v2/TurnStartResponse.json b/codex-rs/app-server-protocol/schema/json/v2/TurnStartResponse.json index 7f1c3e4948..ebb2065cb8 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/TurnStartResponse.json +++ b/codex-rs/app-server-protocol/schema/json/v2/TurnStartResponse.json @@ -430,6 +430,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -1267,6 +1268,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -1288,6 +1305,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/json/v2/TurnStartedNotification.json b/codex-rs/app-server-protocol/schema/json/v2/TurnStartedNotification.json index 761ddc9a62..8b7c2bc410 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/TurnStartedNotification.json +++ b/codex-rs/app-server-protocol/schema/json/v2/TurnStartedNotification.json @@ -430,6 +430,7 @@ }, "McpToolCallResult": { "properties": { + "_meta": true, "content": { "items": true, "type": "array" @@ -1267,6 +1268,22 @@ }, "Turn": { "properties": { + "completedAt": { + "description": "Unix timestamp (in seconds) when the turn completed.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, + "durationMs": { + "description": "Duration between turn start and completion in milliseconds, if known.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "error": { "anyOf": [ { @@ -1288,6 +1305,14 @@ }, "type": "array" }, + "startedAt": { + "description": "Unix timestamp (in seconds) when the turn started.", + "format": "int64", + "type": [ + "integer", + "null" + ] + }, "status": { "$ref": "#/definitions/TurnStatus" } diff --git a/codex-rs/app-server-protocol/schema/typescript/ClientRequest.ts b/codex-rs/app-server-protocol/schema/typescript/ClientRequest.ts index e33a986359..d12599d7a9 100644 --- a/codex-rs/app-server-protocol/schema/typescript/ClientRequest.ts +++ b/codex-rs/app-server-protocol/schema/typescript/ClientRequest.ts @@ -33,6 +33,7 @@ import type { FsWriteFileParams } from "./v2/FsWriteFileParams"; import type { GetAccountParams } from "./v2/GetAccountParams"; import type { ListMcpServerStatusParams } from "./v2/ListMcpServerStatusParams"; import type { LoginAccountParams } from "./v2/LoginAccountParams"; +import type { McpResourceReadParams } from "./v2/McpResourceReadParams"; import type { McpServerOauthLoginParams } from "./v2/McpServerOauthLoginParams"; import type { ModelListParams } from "./v2/ModelListParams"; import type { PluginInstallParams } from "./v2/PluginInstallParams"; @@ -64,4 +65,4 @@ import type { WindowsSandboxSetupStartParams } from "./v2/WindowsSandboxSetupSta /** * Request from the client to the server. */ -export type ClientRequest ={ "method": "initialize", id: RequestId, params: InitializeParams, } | { "method": "thread/start", id: RequestId, params: ThreadStartParams, } | { "method": "thread/resume", id: RequestId, params: ThreadResumeParams, } | { "method": "thread/fork", id: RequestId, params: ThreadForkParams, } | { "method": "thread/archive", id: RequestId, params: ThreadArchiveParams, } | { "method": "thread/unsubscribe", id: RequestId, params: ThreadUnsubscribeParams, } | { "method": "thread/name/set", id: RequestId, params: ThreadSetNameParams, } | { "method": "thread/metadata/update", id: RequestId, params: ThreadMetadataUpdateParams, } | { "method": "thread/unarchive", id: RequestId, params: ThreadUnarchiveParams, } | { "method": "thread/compact/start", id: RequestId, params: ThreadCompactStartParams, } | { "method": "thread/shellCommand", id: RequestId, params: ThreadShellCommandParams, } | { "method": "thread/rollback", id: RequestId, params: ThreadRollbackParams, } | { "method": "thread/list", id: RequestId, params: ThreadListParams, } | { "method": "thread/loaded/list", id: RequestId, params: ThreadLoadedListParams, } | { "method": "thread/read", id: RequestId, params: ThreadReadParams, } | { "method": "skills/list", id: RequestId, params: SkillsListParams, } | { "method": "plugin/list", id: RequestId, params: PluginListParams, } | { "method": "plugin/read", id: RequestId, params: PluginReadParams, } | { "method": "app/list", id: RequestId, params: AppsListParams, } | { "method": "fs/readFile", id: RequestId, params: FsReadFileParams, } | { "method": "fs/writeFile", id: RequestId, params: FsWriteFileParams, } | { "method": "fs/createDirectory", id: RequestId, params: FsCreateDirectoryParams, } | { "method": "fs/getMetadata", id: RequestId, params: FsGetMetadataParams, } | { "method": "fs/readDirectory", id: RequestId, params: FsReadDirectoryParams, } | { "method": "fs/remove", id: RequestId, params: FsRemoveParams, } | { "method": "fs/copy", id: RequestId, params: FsCopyParams, } | { "method": "fs/watch", id: RequestId, params: FsWatchParams, } | { "method": "fs/unwatch", id: RequestId, params: FsUnwatchParams, } | { "method": "skills/config/write", id: RequestId, params: SkillsConfigWriteParams, } | { "method": "plugin/install", id: RequestId, params: PluginInstallParams, } | { "method": "plugin/uninstall", id: RequestId, params: PluginUninstallParams, } | { "method": "turn/start", id: RequestId, params: TurnStartParams, } | { "method": "turn/steer", id: RequestId, params: TurnSteerParams, } | { "method": "turn/interrupt", id: RequestId, params: TurnInterruptParams, } | { "method": "review/start", id: RequestId, params: ReviewStartParams, } | { "method": "model/list", id: RequestId, params: ModelListParams, } | { "method": "experimentalFeature/list", id: RequestId, params: ExperimentalFeatureListParams, } | { "method": "experimentalFeature/enablement/set", id: RequestId, params: ExperimentalFeatureEnablementSetParams, } | { "method": "mcpServer/oauth/login", id: RequestId, params: McpServerOauthLoginParams, } | { "method": "config/mcpServer/reload", id: RequestId, params: undefined, } | { "method": "mcpServerStatus/list", id: RequestId, params: ListMcpServerStatusParams, } | { "method": "windowsSandbox/setupStart", id: RequestId, params: WindowsSandboxSetupStartParams, } | { "method": "account/login/start", id: RequestId, params: LoginAccountParams, } | { "method": "account/login/cancel", id: RequestId, params: CancelLoginAccountParams, } | { "method": "account/logout", id: RequestId, params: undefined, } | { "method": "account/rateLimits/read", id: RequestId, params: undefined, } | { "method": "feedback/upload", id: RequestId, params: FeedbackUploadParams, } | { "method": "command/exec", id: RequestId, params: CommandExecParams, } | { "method": "command/exec/write", id: RequestId, params: CommandExecWriteParams, } | { "method": "command/exec/terminate", id: RequestId, params: CommandExecTerminateParams, } | { "method": "command/exec/resize", id: RequestId, params: CommandExecResizeParams, } | { "method": "config/read", id: RequestId, params: ConfigReadParams, } | { "method": "externalAgentConfig/detect", id: RequestId, params: ExternalAgentConfigDetectParams, } | { "method": "externalAgentConfig/import", id: RequestId, params: ExternalAgentConfigImportParams, } | { "method": "config/value/write", id: RequestId, params: ConfigValueWriteParams, } | { "method": "config/batchWrite", id: RequestId, params: ConfigBatchWriteParams, } | { "method": "configRequirements/read", id: RequestId, params: undefined, } | { "method": "account/read", id: RequestId, params: GetAccountParams, } | { "method": "getConversationSummary", id: RequestId, params: GetConversationSummaryParams, } | { "method": "gitDiffToRemote", id: RequestId, params: GitDiffToRemoteParams, } | { "method": "getAuthStatus", id: RequestId, params: GetAuthStatusParams, } | { "method": "fuzzyFileSearch", id: RequestId, params: FuzzyFileSearchParams, }; +export type ClientRequest ={ "method": "initialize", id: RequestId, params: InitializeParams, } | { "method": "thread/start", id: RequestId, params: ThreadStartParams, } | { "method": "thread/resume", id: RequestId, params: ThreadResumeParams, } | { "method": "thread/fork", id: RequestId, params: ThreadForkParams, } | { "method": "thread/archive", id: RequestId, params: ThreadArchiveParams, } | { "method": "thread/unsubscribe", id: RequestId, params: ThreadUnsubscribeParams, } | { "method": "thread/name/set", id: RequestId, params: ThreadSetNameParams, } | { "method": "thread/metadata/update", id: RequestId, params: ThreadMetadataUpdateParams, } | { "method": "thread/unarchive", id: RequestId, params: ThreadUnarchiveParams, } | { "method": "thread/compact/start", id: RequestId, params: ThreadCompactStartParams, } | { "method": "thread/shellCommand", id: RequestId, params: ThreadShellCommandParams, } | { "method": "thread/rollback", id: RequestId, params: ThreadRollbackParams, } | { "method": "thread/list", id: RequestId, params: ThreadListParams, } | { "method": "thread/loaded/list", id: RequestId, params: ThreadLoadedListParams, } | { "method": "thread/read", id: RequestId, params: ThreadReadParams, } | { "method": "skills/list", id: RequestId, params: SkillsListParams, } | { "method": "plugin/list", id: RequestId, params: PluginListParams, } | { "method": "plugin/read", id: RequestId, params: PluginReadParams, } | { "method": "app/list", id: RequestId, params: AppsListParams, } | { "method": "fs/readFile", id: RequestId, params: FsReadFileParams, } | { "method": "fs/writeFile", id: RequestId, params: FsWriteFileParams, } | { "method": "fs/createDirectory", id: RequestId, params: FsCreateDirectoryParams, } | { "method": "fs/getMetadata", id: RequestId, params: FsGetMetadataParams, } | { "method": "fs/readDirectory", id: RequestId, params: FsReadDirectoryParams, } | { "method": "fs/remove", id: RequestId, params: FsRemoveParams, } | { "method": "fs/copy", id: RequestId, params: FsCopyParams, } | { "method": "fs/watch", id: RequestId, params: FsWatchParams, } | { "method": "fs/unwatch", id: RequestId, params: FsUnwatchParams, } | { "method": "skills/config/write", id: RequestId, params: SkillsConfigWriteParams, } | { "method": "plugin/install", id: RequestId, params: PluginInstallParams, } | { "method": "plugin/uninstall", id: RequestId, params: PluginUninstallParams, } | { "method": "turn/start", id: RequestId, params: TurnStartParams, } | { "method": "turn/steer", id: RequestId, params: TurnSteerParams, } | { "method": "turn/interrupt", id: RequestId, params: TurnInterruptParams, } | { "method": "review/start", id: RequestId, params: ReviewStartParams, } | { "method": "model/list", id: RequestId, params: ModelListParams, } | { "method": "experimentalFeature/list", id: RequestId, params: ExperimentalFeatureListParams, } | { "method": "experimentalFeature/enablement/set", id: RequestId, params: ExperimentalFeatureEnablementSetParams, } | { "method": "mcpServer/oauth/login", id: RequestId, params: McpServerOauthLoginParams, } | { "method": "config/mcpServer/reload", id: RequestId, params: undefined, } | { "method": "mcpServerStatus/list", id: RequestId, params: ListMcpServerStatusParams, } | { "method": "mcpServer/resource/read", id: RequestId, params: McpResourceReadParams, } | { "method": "windowsSandbox/setupStart", id: RequestId, params: WindowsSandboxSetupStartParams, } | { "method": "account/login/start", id: RequestId, params: LoginAccountParams, } | { "method": "account/login/cancel", id: RequestId, params: CancelLoginAccountParams, } | { "method": "account/logout", id: RequestId, params: undefined, } | { "method": "account/rateLimits/read", id: RequestId, params: undefined, } | { "method": "feedback/upload", id: RequestId, params: FeedbackUploadParams, } | { "method": "command/exec", id: RequestId, params: CommandExecParams, } | { "method": "command/exec/write", id: RequestId, params: CommandExecWriteParams, } | { "method": "command/exec/terminate", id: RequestId, params: CommandExecTerminateParams, } | { "method": "command/exec/resize", id: RequestId, params: CommandExecResizeParams, } | { "method": "config/read", id: RequestId, params: ConfigReadParams, } | { "method": "externalAgentConfig/detect", id: RequestId, params: ExternalAgentConfigDetectParams, } | { "method": "externalAgentConfig/import", id: RequestId, params: ExternalAgentConfigImportParams, } | { "method": "config/value/write", id: RequestId, params: ConfigValueWriteParams, } | { "method": "config/batchWrite", id: RequestId, params: ConfigBatchWriteParams, } | { "method": "configRequirements/read", id: RequestId, params: undefined, } | { "method": "account/read", id: RequestId, params: GetAccountParams, } | { "method": "getConversationSummary", id: RequestId, params: GetConversationSummaryParams, } | { "method": "gitDiffToRemote", id: RequestId, params: GitDiffToRemoteParams, } | { "method": "getAuthStatus", id: RequestId, params: GetAuthStatusParams, } | { "method": "fuzzyFileSearch", id: RequestId, params: FuzzyFileSearchParams, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/ResourceContent.ts b/codex-rs/app-server-protocol/schema/typescript/ResourceContent.ts new file mode 100644 index 0000000000..60fe239dc4 --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/ResourceContent.ts @@ -0,0 +1,17 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { JsonValue } from "./serde_json/JsonValue"; + +/** + * Contents returned when reading a resource from an MCP server. + */ +export type ResourceContent = { +/** + * The URI of this resource. + */ +uri: string, mimeType?: string, text: string, _meta?: JsonValue, } | { +/** + * The URI of this resource. + */ +uri: string, mimeType?: string, blob: string, _meta?: JsonValue, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/index.ts b/codex-rs/app-server-protocol/schema/typescript/index.ts index 09c388337f..7ffc15e83d 100644 --- a/codex-rs/app-server-protocol/schema/typescript/index.ts +++ b/codex-rs/app-server-protocol/schema/typescript/index.ts @@ -55,6 +55,7 @@ export type { ReasoningItemReasoningSummary } from "./ReasoningItemReasoningSumm export type { ReasoningSummary } from "./ReasoningSummary"; export type { RequestId } from "./RequestId"; export type { Resource } from "./Resource"; +export type { ResourceContent } from "./ResourceContent"; export type { ResourceTemplate } from "./ResourceTemplate"; export type { ResponseItem } from "./ResponseItem"; export type { ReviewDecision } from "./ReviewDecision"; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/FsChangedNotification.ts b/codex-rs/app-server-protocol/schema/typescript/v2/FsChangedNotification.ts index 2e9bd0d6ec..fc0fa9cf6d 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/FsChangedNotification.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/FsChangedNotification.ts @@ -8,7 +8,7 @@ import type { AbsolutePathBuf } from "../AbsolutePathBuf"; */ export type FsChangedNotification = { /** - * Watch identifier returned by `fs/watch`. + * Watch identifier previously provided to `fs/watch`. */ watchId: string, /** diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/FsUnwatchParams.ts b/codex-rs/app-server-protocol/schema/typescript/v2/FsUnwatchParams.ts index b21befdb5c..34b70a9307 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/FsUnwatchParams.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/FsUnwatchParams.ts @@ -7,6 +7,6 @@ */ export type FsUnwatchParams = { /** - * Watch identifier returned by `fs/watch`. + * Watch identifier previously provided to `fs/watch`. */ watchId: string, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/FsWatchParams.ts b/codex-rs/app-server-protocol/schema/typescript/v2/FsWatchParams.ts index d6d956b288..60755538cb 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/FsWatchParams.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/FsWatchParams.ts @@ -7,6 +7,10 @@ import type { AbsolutePathBuf } from "../AbsolutePathBuf"; * Start filesystem watch notifications for an absolute path. */ export type FsWatchParams = { +/** + * Connection-scoped watch identifier used for `fs/unwatch` and `fs/changed`. + */ +watchId: string, /** * Absolute file or directory path to watch. */ diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/FsWatchResponse.ts b/codex-rs/app-server-protocol/schema/typescript/v2/FsWatchResponse.ts index 1927272821..b640306d82 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/FsWatchResponse.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/FsWatchResponse.ts @@ -4,13 +4,9 @@ import type { AbsolutePathBuf } from "../AbsolutePathBuf"; /** - * Created watch handle returned by `fs/watch`. + * Successful response for `fs/watch`. */ export type FsWatchResponse = { -/** - * Connection-scoped watch identifier used for `fs/unwatch` and `fs/changed`. - */ -watchId: string, /** * Canonicalized path associated with the watch. */ diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ListMcpServerStatusParams.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ListMcpServerStatusParams.ts index 05c02c19f8..8225c462b1 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/ListMcpServerStatusParams.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ListMcpServerStatusParams.ts @@ -1,6 +1,7 @@ // GENERATED CODE! DO NOT MODIFY BY HAND! // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { McpServerStatusDetail } from "./McpServerStatusDetail"; export type ListMcpServerStatusParams = { /** @@ -10,4 +11,9 @@ cursor?: string | null, /** * Optional page size; defaults to a server-defined value. */ -limit?: number | null, }; +limit?: number | null, +/** + * Controls how much MCP inventory data to fetch for each server. + * Defaults to `Full` when omitted. + */ +detail?: McpServerStatusDetail | null, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/McpResourceReadParams.ts b/codex-rs/app-server-protocol/schema/typescript/v2/McpResourceReadParams.ts new file mode 100644 index 0000000000..51d650d9bb --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/v2/McpResourceReadParams.ts @@ -0,0 +1,5 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type McpResourceReadParams = { threadId: string, server: string, uri: string, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/McpResourceReadResponse.ts b/codex-rs/app-server-protocol/schema/typescript/v2/McpResourceReadResponse.ts new file mode 100644 index 0000000000..2af1dbcd09 --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/v2/McpResourceReadResponse.ts @@ -0,0 +1,6 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { ResourceContent } from "../ResourceContent"; + +export type McpResourceReadResponse = { contents: Array, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/McpServerStatusDetail.ts b/codex-rs/app-server-protocol/schema/typescript/v2/McpServerStatusDetail.ts new file mode 100644 index 0000000000..ab97cc2f31 --- /dev/null +++ b/codex-rs/app-server-protocol/schema/typescript/v2/McpServerStatusDetail.ts @@ -0,0 +1,5 @@ +// GENERATED CODE! DO NOT MODIFY BY HAND! + +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type McpServerStatusDetail = "full" | "toolsAndAuthOnly"; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/McpToolCallResult.ts b/codex-rs/app-server-protocol/schema/typescript/v2/McpToolCallResult.ts index f493a86094..916a5f5bb3 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/McpToolCallResult.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/McpToolCallResult.ts @@ -3,4 +3,4 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. import type { JsonValue } from "../serde_json/JsonValue"; -export type McpToolCallResult = { content: Array, structuredContent: JsonValue | null, }; +export type McpToolCallResult = { content: Array, structuredContent: JsonValue | null, _meta: JsonValue | null, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/NetworkRequirements.ts b/codex-rs/app-server-protocol/schema/typescript/v2/NetworkRequirements.ts index 5fc942bef5..c685b9539c 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/NetworkRequirements.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/NetworkRequirements.ts @@ -29,4 +29,4 @@ unixSockets: { [key in string]?: NetworkUnixSocketPermission } | null, /** * Legacy compatibility view derived from `unix_sockets`. */ -allowUnixSockets: Array | null, allowLocalBinding: boolean | null, }; +allowUnixSockets: Array | null, allowLocalBinding: boolean | null, dangerFullAccessDenylistOnly: boolean | null, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadForkParams.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadForkParams.ts index a7ba311803..952da565a6 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadForkParams.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadForkParams.ts @@ -27,7 +27,7 @@ model?: string | null, modelProvider?: string | null, serviceTier?: ServiceTier * Override where approval requests are routed for review on this thread * and subsequent turns. */ -approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, ephemeral?: boolean, /** +approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null | null, developerInstructions?: string | null | null, ephemeral?: boolean, /** * If true, persist additional rollout EventMsg variants required to * reconstruct a richer thread history on subsequent resume/fork/read. */ diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadResumeParams.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadResumeParams.ts index 770344de8e..fcde0ae502 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadResumeParams.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadResumeParams.ts @@ -36,7 +36,7 @@ model?: string | null, modelProvider?: string | null, serviceTier?: ServiceTier * Override where approval requests are routed for review on this thread * and subsequent turns. */ -approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null, developerInstructions?: string | null, personality?: Personality | null, /** +approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, baseInstructions?: string | null | null, developerInstructions?: string | null | null, personality?: Personality | null, /** * If true, persist additional rollout EventMsg variants required to * reconstruct a richer thread history on subsequent resume/fork/read. */ diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadStartParams.ts b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadStartParams.ts index 61f501ad60..c7b27823f6 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/ThreadStartParams.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/ThreadStartParams.ts @@ -12,7 +12,7 @@ export type ThreadStartParams = {model?: string | null, modelProvider?: string | * Override where approval requests are routed for review on this thread * and subsequent turns. */ -approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, serviceName?: string | null, baseInstructions?: string | null, developerInstructions?: string | null, personality?: Personality | null, ephemeral?: boolean | null, /** +approvalsReviewer?: ApprovalsReviewer | null, sandbox?: SandboxMode | null, config?: { [key in string]?: JsonValue } | null, serviceName?: string | null, baseInstructions?: string | null | null, developerInstructions?: string | null | null, personality?: Personality | null, ephemeral?: boolean | null, /** * If true, opt into emitting raw Responses API items on the event stream. * This is for internal use only (e.g. Codex Cloud). */ diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/Turn.ts b/codex-rs/app-server-protocol/schema/typescript/v2/Turn.ts index 709ed5ccbe..074ac215fd 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/Turn.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/Turn.ts @@ -15,4 +15,16 @@ items: Array, status: TurnStatus, /** * Only populated when the Turn's status is failed. */ -error: TurnError | null, }; +error: TurnError | null, +/** + * Unix timestamp (in seconds) when the turn started. + */ +startedAt: number | null, +/** + * Unix timestamp (in seconds) when the turn completed. + */ +completedAt: number | null, +/** + * Duration between turn start and completion in milliseconds, if known. + */ +durationMs: number | null, }; diff --git a/codex-rs/app-server-protocol/schema/typescript/v2/index.ts b/codex-rs/app-server-protocol/schema/typescript/v2/index.ts index 495572fd78..4e75a31e38 100644 --- a/codex-rs/app-server-protocol/schema/typescript/v2/index.ts +++ b/codex-rs/app-server-protocol/schema/typescript/v2/index.ts @@ -172,6 +172,8 @@ export type { McpElicitationTitledSingleSelectEnumSchema } from "./McpElicitatio export type { McpElicitationUntitledEnumItems } from "./McpElicitationUntitledEnumItems"; export type { McpElicitationUntitledMultiSelectEnumSchema } from "./McpElicitationUntitledMultiSelectEnumSchema"; export type { McpElicitationUntitledSingleSelectEnumSchema } from "./McpElicitationUntitledSingleSelectEnumSchema"; +export type { McpResourceReadParams } from "./McpResourceReadParams"; +export type { McpResourceReadResponse } from "./McpResourceReadResponse"; export type { McpServerElicitationAction } from "./McpServerElicitationAction"; export type { McpServerElicitationRequestParams } from "./McpServerElicitationRequestParams"; export type { McpServerElicitationRequestResponse } from "./McpServerElicitationRequestResponse"; @@ -181,6 +183,7 @@ export type { McpServerOauthLoginResponse } from "./McpServerOauthLoginResponse" export type { McpServerRefreshResponse } from "./McpServerRefreshResponse"; export type { McpServerStartupState } from "./McpServerStartupState"; export type { McpServerStatus } from "./McpServerStatus"; +export type { McpServerStatusDetail } from "./McpServerStatusDetail"; export type { McpServerStatusUpdatedNotification } from "./McpServerStatusUpdatedNotification"; export type { McpToolCallError } from "./McpToolCallError"; export type { McpToolCallProgressNotification } from "./McpToolCallProgressNotification"; diff --git a/codex-rs/app-server-protocol/src/lib.rs b/codex-rs/app-server-protocol/src/lib.rs index b1a8f474f3..d5c2f4b243 100644 --- a/codex-rs/app-server-protocol/src/lib.rs +++ b/codex-rs/app-server-protocol/src/lib.rs @@ -15,6 +15,7 @@ pub use export::generate_ts_with_options; pub use export::generate_types; pub use jsonrpc_lite::*; pub use protocol::common::*; +pub use protocol::item_builders::*; pub use protocol::thread_history::*; pub use protocol::v1::ApplyPatchApprovalParams; pub use protocol::v1::ApplyPatchApprovalResponse; diff --git a/codex-rs/app-server-protocol/src/protocol/common.rs b/codex-rs/app-server-protocol/src/protocol/common.rs index ebd1ba11c5..3ac96fa0eb 100644 --- a/codex-rs/app-server-protocol/src/protocol/common.rs +++ b/codex-rs/app-server-protocol/src/protocol/common.rs @@ -459,6 +459,11 @@ client_request_definitions! { response: v2::ListMcpServerStatusResponse, }, + McpResourceRead => "mcpServer/resource/read" { + params: v2::McpResourceReadParams, + response: v2::McpResourceReadResponse, + }, + WindowsSandboxSetupStart => "windowsSandbox/setupStart" { params: v2::WindowsSandboxSetupStartParams, response: v2::WindowsSandboxSetupStartResponse, @@ -1630,6 +1635,7 @@ mod tests { let request = ClientRequest::FsWatch { request_id: RequestId::Integer(10), params: v2::FsWatchParams { + watch_id: "watch-git".to_string(), path: absolute_path("tmp/repo/.git"), }, }; @@ -1638,6 +1644,7 @@ mod tests { "method": "fs/watch", "id": 10, "params": { + "watchId": "watch-git", "path": absolute_path_string("tmp/repo/.git") } }), diff --git a/codex-rs/app-server-protocol/src/protocol/item_builders.rs b/codex-rs/app-server-protocol/src/protocol/item_builders.rs new file mode 100644 index 0000000000..804169db6f --- /dev/null +++ b/codex-rs/app-server-protocol/src/protocol/item_builders.rs @@ -0,0 +1,299 @@ +//! Shared builders for synthetic [`ThreadItem`] values emitted by the app-server layer. +//! +//! These items do not come from first-class core `ItemStarted` / `ItemCompleted` events. +//! Instead, the app-server synthesizes them so clients can render a coherent lifecycle for +//! approvals and other pre-execution flows before the underlying tool has started or when the +//! tool never starts at all. +//! +//! Keeping these builders in one place is useful for two reasons: +//! - Live notifications and rebuilt `thread/read` history both need to construct the same +//! synthetic items, so sharing the logic avoids drift between those paths. +//! - The projection is presentation-specific. Core protocol events stay generic, while the +//! app-server protocol decides how to surface those events as `ThreadItem`s for clients. +use crate::protocol::common::ServerNotification; +use crate::protocol::v2::CommandAction; +use crate::protocol::v2::CommandExecutionSource; +use crate::protocol::v2::CommandExecutionStatus; +use crate::protocol::v2::FileUpdateChange; +use crate::protocol::v2::GuardianApprovalReview; +use crate::protocol::v2::GuardianApprovalReviewStatus; +use crate::protocol::v2::ItemGuardianApprovalReviewCompletedNotification; +use crate::protocol::v2::ItemGuardianApprovalReviewStartedNotification; +use crate::protocol::v2::PatchApplyStatus; +use crate::protocol::v2::PatchChangeKind; +use crate::protocol::v2::ThreadItem; +use codex_protocol::ThreadId; +use codex_protocol::protocol::ApplyPatchApprovalRequestEvent; +use codex_protocol::protocol::ExecApprovalRequestEvent; +use codex_protocol::protocol::ExecCommandBeginEvent; +use codex_protocol::protocol::ExecCommandEndEvent; +use codex_protocol::protocol::FileChange; +use codex_protocol::protocol::GuardianAssessmentAction; +use codex_protocol::protocol::GuardianAssessmentEvent; +use codex_protocol::protocol::PatchApplyBeginEvent; +use codex_protocol::protocol::PatchApplyEndEvent; +use codex_shell_command::parse_command::parse_command; +use codex_shell_command::parse_command::shlex_join; +use std::collections::HashMap; +use std::path::PathBuf; + +pub fn build_file_change_approval_request_item( + payload: &ApplyPatchApprovalRequestEvent, +) -> ThreadItem { + ThreadItem::FileChange { + id: payload.call_id.clone(), + changes: convert_patch_changes(&payload.changes), + status: PatchApplyStatus::InProgress, + } +} + +pub fn build_file_change_begin_item(payload: &PatchApplyBeginEvent) -> ThreadItem { + ThreadItem::FileChange { + id: payload.call_id.clone(), + changes: convert_patch_changes(&payload.changes), + status: PatchApplyStatus::InProgress, + } +} + +pub fn build_file_change_end_item(payload: &PatchApplyEndEvent) -> ThreadItem { + ThreadItem::FileChange { + id: payload.call_id.clone(), + changes: convert_patch_changes(&payload.changes), + status: (&payload.status).into(), + } +} + +pub fn build_command_execution_approval_request_item( + payload: &ExecApprovalRequestEvent, +) -> ThreadItem { + ThreadItem::CommandExecution { + id: payload.call_id.clone(), + command: shlex_join(&payload.command), + cwd: payload.cwd.clone(), + process_id: None, + source: CommandExecutionSource::Agent, + status: CommandExecutionStatus::InProgress, + command_actions: payload + .parsed_cmd + .iter() + .cloned() + .map(CommandAction::from) + .collect(), + aggregated_output: None, + exit_code: None, + duration_ms: None, + } +} + +pub fn build_command_execution_begin_item(payload: &ExecCommandBeginEvent) -> ThreadItem { + ThreadItem::CommandExecution { + id: payload.call_id.clone(), + command: shlex_join(&payload.command), + cwd: payload.cwd.clone(), + process_id: payload.process_id.clone(), + source: payload.source.into(), + status: CommandExecutionStatus::InProgress, + command_actions: payload + .parsed_cmd + .iter() + .cloned() + .map(CommandAction::from) + .collect(), + aggregated_output: None, + exit_code: None, + duration_ms: None, + } +} + +pub fn build_command_execution_end_item(payload: &ExecCommandEndEvent) -> ThreadItem { + let aggregated_output = if payload.aggregated_output.is_empty() { + None + } else { + Some(payload.aggregated_output.clone()) + }; + let duration_ms = i64::try_from(payload.duration.as_millis()).unwrap_or(i64::MAX); + + ThreadItem::CommandExecution { + id: payload.call_id.clone(), + command: shlex_join(&payload.command), + cwd: payload.cwd.clone(), + process_id: payload.process_id.clone(), + source: payload.source.into(), + status: (&payload.status).into(), + command_actions: payload + .parsed_cmd + .iter() + .cloned() + .map(CommandAction::from) + .collect(), + aggregated_output, + exit_code: Some(payload.exit_code), + duration_ms: Some(duration_ms), + } +} + +/// Build a guardian-derived [`ThreadItem`]. +/// +/// Currently this only synthesizes [`ThreadItem::CommandExecution`] for +/// [`GuardianAssessmentAction::Command`] and [`GuardianAssessmentAction::Execve`]. +pub fn build_item_from_guardian_event( + assessment: &GuardianAssessmentEvent, + status: CommandExecutionStatus, +) -> Option { + match &assessment.action { + GuardianAssessmentAction::Command { command, cwd, .. } => { + let command = command.clone(); + let command_actions = vec![CommandAction::Unknown { + command: command.clone(), + }]; + Some(ThreadItem::CommandExecution { + id: assessment.id.clone(), + command, + cwd: cwd.clone(), + process_id: None, + source: CommandExecutionSource::Agent, + status, + command_actions, + aggregated_output: None, + exit_code: None, + duration_ms: None, + }) + } + GuardianAssessmentAction::Execve { + program, argv, cwd, .. + } => { + let argv = if argv.is_empty() { + vec![program.clone()] + } else { + std::iter::once(program.clone()) + .chain(argv.iter().skip(1).cloned()) + .collect::>() + }; + let command = shlex_join(&argv); + let parsed_cmd = parse_command(&argv); + let command_actions = if parsed_cmd.is_empty() { + vec![CommandAction::Unknown { + command: command.clone(), + }] + } else { + parsed_cmd.into_iter().map(CommandAction::from).collect() + }; + Some(ThreadItem::CommandExecution { + id: assessment.id.clone(), + command, + cwd: cwd.clone(), + process_id: None, + source: CommandExecutionSource::Agent, + status, + command_actions, + aggregated_output: None, + exit_code: None, + duration_ms: None, + }) + } + GuardianAssessmentAction::ApplyPatch { .. } + | GuardianAssessmentAction::NetworkAccess { .. } + | GuardianAssessmentAction::McpToolCall { .. } => None, + } +} + +pub fn guardian_auto_approval_review_notification( + conversation_id: &ThreadId, + event_turn_id: &str, + assessment: &GuardianAssessmentEvent, +) -> ServerNotification { + // TODO(ccunningham): Attach guardian review state to the reviewed tool + // item's lifecycle instead of sending standalone review notifications so + // the app-server API can persist and replay review state via `thread/read`. + let turn_id = if assessment.turn_id.is_empty() { + event_turn_id.to_string() + } else { + assessment.turn_id.clone() + }; + let review = GuardianApprovalReview { + status: match assessment.status { + codex_protocol::protocol::GuardianAssessmentStatus::InProgress => { + GuardianApprovalReviewStatus::InProgress + } + codex_protocol::protocol::GuardianAssessmentStatus::Approved => { + GuardianApprovalReviewStatus::Approved + } + codex_protocol::protocol::GuardianAssessmentStatus::Denied => { + GuardianApprovalReviewStatus::Denied + } + codex_protocol::protocol::GuardianAssessmentStatus::Aborted => { + GuardianApprovalReviewStatus::Aborted + } + }, + risk_score: assessment.risk_score, + risk_level: assessment.risk_level.map(Into::into), + rationale: assessment.rationale.clone(), + }; + let action = assessment.action.clone().into(); + match assessment.status { + codex_protocol::protocol::GuardianAssessmentStatus::InProgress => { + ServerNotification::ItemGuardianApprovalReviewStarted( + ItemGuardianApprovalReviewStartedNotification { + thread_id: conversation_id.to_string(), + turn_id, + target_item_id: assessment.id.clone(), + review, + action, + }, + ) + } + codex_protocol::protocol::GuardianAssessmentStatus::Approved + | codex_protocol::protocol::GuardianAssessmentStatus::Denied + | codex_protocol::protocol::GuardianAssessmentStatus::Aborted => { + ServerNotification::ItemGuardianApprovalReviewCompleted( + ItemGuardianApprovalReviewCompletedNotification { + thread_id: conversation_id.to_string(), + turn_id, + target_item_id: assessment.id.clone(), + review, + action, + }, + ) + } + } +} + +pub fn convert_patch_changes(changes: &HashMap) -> Vec { + let mut converted: Vec = changes + .iter() + .map(|(path, change)| FileUpdateChange { + path: path.to_string_lossy().into_owned(), + kind: map_patch_change_kind(change), + diff: format_file_change_diff(change), + }) + .collect(); + converted.sort_by(|a, b| a.path.cmp(&b.path)); + converted +} + +fn map_patch_change_kind(change: &FileChange) -> PatchChangeKind { + match change { + FileChange::Add { .. } => PatchChangeKind::Add, + FileChange::Delete { .. } => PatchChangeKind::Delete, + FileChange::Update { move_path, .. } => PatchChangeKind::Update { + move_path: move_path.clone(), + }, + } +} + +fn format_file_change_diff(change: &FileChange) -> String { + match change { + FileChange::Add { content } => content.clone(), + FileChange::Delete { content } => content.clone(), + FileChange::Update { + unified_diff, + move_path, + } => { + if let Some(path) = move_path { + format!("{unified_diff}\n\nMoved to: {}", path.display()) + } else { + unified_diff.clone() + } + } + } +} diff --git a/codex-rs/app-server-protocol/src/protocol/mod.rs b/codex-rs/app-server-protocol/src/protocol/mod.rs index 1e0410f4b9..4179d361c7 100644 --- a/codex-rs/app-server-protocol/src/protocol/mod.rs +++ b/codex-rs/app-server-protocol/src/protocol/mod.rs @@ -2,6 +2,7 @@ // Exposes protocol pieces used by `lib.rs` via `pub use protocol::common::*;`. pub mod common; +pub mod item_builders; mod mappers; mod serde_helpers; pub mod thread_history; diff --git a/codex-rs/app-server-protocol/src/protocol/thread_history.rs b/codex-rs/app-server-protocol/src/protocol/thread_history.rs index 48fa56d687..27d393629b 100644 --- a/codex-rs/app-server-protocol/src/protocol/thread_history.rs +++ b/codex-rs/app-server-protocol/src/protocol/thread_history.rs @@ -1,16 +1,18 @@ +use crate::protocol::item_builders::build_command_execution_begin_item; +use crate::protocol::item_builders::build_command_execution_end_item; +use crate::protocol::item_builders::build_file_change_approval_request_item; +use crate::protocol::item_builders::build_file_change_begin_item; +use crate::protocol::item_builders::build_file_change_end_item; +use crate::protocol::item_builders::build_item_from_guardian_event; use crate::protocol::v2::CollabAgentState; use crate::protocol::v2::CollabAgentTool; use crate::protocol::v2::CollabAgentToolCallStatus; -use crate::protocol::v2::CommandAction; use crate::protocol::v2::CommandExecutionStatus; use crate::protocol::v2::DynamicToolCallOutputContentItem; use crate::protocol::v2::DynamicToolCallStatus; -use crate::protocol::v2::FileUpdateChange; use crate::protocol::v2::McpToolCallError; use crate::protocol::v2::McpToolCallResult; use crate::protocol::v2::McpToolCallStatus; -use crate::protocol::v2::PatchApplyStatus; -use crate::protocol::v2::PatchChangeKind; use crate::protocol::v2::ThreadItem; use crate::protocol::v2::Turn; use crate::protocol::v2::TurnError as V2TurnError; @@ -31,6 +33,8 @@ use codex_protocol::protocol::ErrorEvent; use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::ExecCommandBeginEvent; use codex_protocol::protocol::ExecCommandEndEvent; +use codex_protocol::protocol::GuardianAssessmentEvent; +use codex_protocol::protocol::GuardianAssessmentStatus; use codex_protocol::protocol::ImageGenerationBeginEvent; use codex_protocol::protocol::ImageGenerationEndEvent; use codex_protocol::protocol::ItemCompletedEvent; @@ -53,6 +57,14 @@ use std::collections::HashMap; use tracing::warn; use uuid::Uuid; +#[cfg(test)] +use crate::protocol::v2::CommandAction; +#[cfg(test)] +use crate::protocol::v2::FileUpdateChange; +#[cfg(test)] +use crate::protocol::v2::PatchApplyStatus; +#[cfg(test)] +use crate::protocol::v2::PatchChangeKind; #[cfg(test)] use codex_protocol::protocol::ExecCommandStatus as CoreExecCommandStatus; #[cfg(test)] @@ -149,6 +161,7 @@ impl ThreadHistoryBuilder { EventMsg::WebSearchEnd(payload) => self.handle_web_search_end(payload), EventMsg::ExecCommandBegin(payload) => self.handle_exec_command_begin(payload), EventMsg::ExecCommandEnd(payload) => self.handle_exec_command_end(payload), + EventMsg::GuardianAssessment(payload) => self.handle_guardian_assessment(payload), EventMsg::ApplyPatchApprovalRequest(payload) => { self.handle_apply_patch_approval_request(payload) } @@ -375,57 +388,12 @@ impl ThreadHistoryBuilder { } fn handle_exec_command_begin(&mut self, payload: &ExecCommandBeginEvent) { - let command = shlex::try_join(payload.command.iter().map(String::as_str)) - .unwrap_or_else(|_| payload.command.join(" ")); - let command_actions = payload - .parsed_cmd - .iter() - .cloned() - .map(CommandAction::from) - .collect(); - let item = ThreadItem::CommandExecution { - id: payload.call_id.clone(), - command, - cwd: payload.cwd.clone(), - process_id: payload.process_id.clone(), - source: payload.source.into(), - status: CommandExecutionStatus::InProgress, - command_actions, - aggregated_output: None, - exit_code: None, - duration_ms: None, - }; + let item = build_command_execution_begin_item(payload); self.upsert_item_in_turn_id(&payload.turn_id, item); } fn handle_exec_command_end(&mut self, payload: &ExecCommandEndEvent) { - let status: CommandExecutionStatus = (&payload.status).into(); - let duration_ms = i64::try_from(payload.duration.as_millis()).unwrap_or(i64::MAX); - let aggregated_output = if payload.aggregated_output.is_empty() { - None - } else { - Some(payload.aggregated_output.clone()) - }; - let command = shlex::try_join(payload.command.iter().map(String::as_str)) - .unwrap_or_else(|_| payload.command.join(" ")); - let command_actions = payload - .parsed_cmd - .iter() - .cloned() - .map(CommandAction::from) - .collect(); - let item = ThreadItem::CommandExecution { - id: payload.call_id.clone(), - command, - cwd: payload.cwd.clone(), - process_id: payload.process_id.clone(), - source: payload.source.into(), - status, - command_actions, - aggregated_output, - exit_code: Some(payload.exit_code), - duration_ms: Some(duration_ms), - }; + let item = build_command_execution_end_item(payload); // Command completions can arrive out of order. Unified exec may return // while a PTY is still running, then emit ExecCommandEnd later from a // background exit watcher when that process finally exits. By then, a @@ -434,12 +402,26 @@ impl ThreadHistoryBuilder { self.upsert_item_in_turn_id(&payload.turn_id, item); } - fn handle_apply_patch_approval_request(&mut self, payload: &ApplyPatchApprovalRequestEvent) { - let item = ThreadItem::FileChange { - id: payload.call_id.clone(), - changes: convert_patch_changes(&payload.changes), - status: PatchApplyStatus::InProgress, + fn handle_guardian_assessment(&mut self, payload: &GuardianAssessmentEvent) { + let status = match payload.status { + GuardianAssessmentStatus::InProgress => CommandExecutionStatus::InProgress, + GuardianAssessmentStatus::Denied | GuardianAssessmentStatus::Aborted => { + CommandExecutionStatus::Declined + } + GuardianAssessmentStatus::Approved => return, }; + let Some(item) = build_item_from_guardian_event(payload, status) else { + return; + }; + if payload.turn_id.is_empty() { + self.upsert_item_in_current_turn(item); + } else { + self.upsert_item_in_turn_id(&payload.turn_id, item); + } + } + + fn handle_apply_patch_approval_request(&mut self, payload: &ApplyPatchApprovalRequestEvent) { + let item = build_file_change_approval_request_item(payload); if payload.turn_id.is_empty() { self.upsert_item_in_current_turn(item); } else { @@ -448,11 +430,7 @@ impl ThreadHistoryBuilder { } fn handle_patch_apply_begin(&mut self, payload: &PatchApplyBeginEvent) { - let item = ThreadItem::FileChange { - id: payload.call_id.clone(), - changes: convert_patch_changes(&payload.changes), - status: PatchApplyStatus::InProgress, - }; + let item = build_file_change_begin_item(payload); if payload.turn_id.is_empty() { self.upsert_item_in_current_turn(item); } else { @@ -461,12 +439,7 @@ impl ThreadHistoryBuilder { } fn handle_patch_apply_end(&mut self, payload: &PatchApplyEndEvent) { - let status: PatchApplyStatus = (&payload.status).into(); - let item = ThreadItem::FileChange { - id: payload.call_id.clone(), - changes: convert_patch_changes(&payload.changes), - status, - }; + let item = build_file_change_end_item(payload); if payload.turn_id.is_empty() { self.upsert_item_in_current_turn(item); } else { @@ -547,6 +520,7 @@ impl ThreadHistoryBuilder { Some(McpToolCallResult { content: value.content.clone(), structured_content: value.structured_content.clone(), + meta: value.meta.clone(), }), None, ), @@ -891,22 +865,29 @@ impl ThreadHistoryBuilder { } fn handle_turn_aborted(&mut self, payload: &TurnAbortedEvent) { + let apply_abort = |turn: &mut PendingTurn| { + turn.status = TurnStatus::Interrupted; + turn.completed_at = payload.completed_at; + turn.duration_ms = payload.duration_ms; + }; if let Some(turn_id) = payload.turn_id.as_deref() { // Prefer an exact ID match so we interrupt the turn explicitly targeted by the event. if let Some(turn) = self.current_turn.as_mut().filter(|turn| turn.id == turn_id) { - turn.status = TurnStatus::Interrupted; + apply_abort(turn); return; } if let Some(turn) = self.turns.iter_mut().find(|turn| turn.id == turn_id) { turn.status = TurnStatus::Interrupted; + turn.completed_at = payload.completed_at; + turn.duration_ms = payload.duration_ms; return; } } // If the event has no ID (or refers to an unknown turn), fall back to the active turn. if let Some(turn) = self.current_turn.as_mut() { - turn.status = TurnStatus::Interrupted; + apply_abort(turn); } } @@ -915,15 +896,18 @@ impl ThreadHistoryBuilder { self.current_turn = Some( self.new_turn(Some(payload.turn_id.clone())) .with_status(TurnStatus::InProgress) + .with_started_at(payload.started_at) .opened_explicitly(), ); } fn handle_turn_complete(&mut self, payload: &TurnCompleteEvent) { - let mark_completed = |status: &mut TurnStatus| { - if matches!(*status, TurnStatus::Completed | TurnStatus::InProgress) { - *status = TurnStatus::Completed; + let mark_completed = |turn: &mut PendingTurn| { + if matches!(turn.status, TurnStatus::Completed | TurnStatus::InProgress) { + turn.status = TurnStatus::Completed; } + turn.completed_at = payload.completed_at; + turn.duration_ms = payload.duration_ms; }; // Prefer an exact ID match from the active turn and then close it. @@ -932,7 +916,7 @@ impl ThreadHistoryBuilder { .as_mut() .filter(|turn| turn.id == payload.turn_id) { - mark_completed(&mut current_turn.status); + mark_completed(current_turn); self.finish_current_turn(); return; } @@ -942,13 +926,17 @@ impl ThreadHistoryBuilder { .iter_mut() .find(|turn| turn.id == payload.turn_id) { - mark_completed(&mut turn.status); + if matches!(turn.status, TurnStatus::Completed | TurnStatus::InProgress) { + turn.status = TurnStatus::Completed; + } + turn.completed_at = payload.completed_at; + turn.duration_ms = payload.duration_ms; return; } // If the completion event cannot be matched, apply it to the active turn. if let Some(current_turn) = self.current_turn.as_mut() { - mark_completed(&mut current_turn.status); + mark_completed(current_turn); self.finish_current_turn(); } } @@ -981,7 +969,7 @@ impl ThreadHistoryBuilder { if turn.items.is_empty() && !turn.opened_explicitly && !turn.saw_compaction { return; } - self.turns.push(turn.into()); + self.turns.push(Turn::from(turn)); } } @@ -991,6 +979,9 @@ impl ThreadHistoryBuilder { items: Vec::new(), error: None, status: TurnStatus::Completed, + started_at: None, + completed_at: None, + duration_ms: None, opened_explicitly: false, saw_compaction: false, rollout_start_index: self.current_rollout_index, @@ -1076,21 +1067,6 @@ fn render_review_output_text(output: &ReviewOutputEvent) -> String { } } -pub fn convert_patch_changes( - changes: &HashMap, -) -> Vec { - let mut converted: Vec = changes - .iter() - .map(|(path, change)| FileUpdateChange { - path: path.to_string_lossy().into_owned(), - kind: map_patch_change_kind(change), - diff: format_file_change_diff(change), - }) - .collect(); - converted.sort_by(|a, b| a.path.cmp(&b.path)); - converted -} - fn convert_dynamic_tool_content_items( items: &[codex_protocol::dynamic_tools::DynamicToolCallOutputContentItem], ) -> Vec { @@ -1108,33 +1084,6 @@ fn convert_dynamic_tool_content_items( .collect() } -fn map_patch_change_kind(change: &codex_protocol::protocol::FileChange) -> PatchChangeKind { - match change { - codex_protocol::protocol::FileChange::Add { .. } => PatchChangeKind::Add, - codex_protocol::protocol::FileChange::Delete { .. } => PatchChangeKind::Delete, - codex_protocol::protocol::FileChange::Update { move_path, .. } => PatchChangeKind::Update { - move_path: move_path.clone(), - }, - } -} - -fn format_file_change_diff(change: &codex_protocol::protocol::FileChange) -> String { - match change { - codex_protocol::protocol::FileChange::Add { content } => content.clone(), - codex_protocol::protocol::FileChange::Delete { content } => content.clone(), - codex_protocol::protocol::FileChange::Update { - unified_diff, - move_path, - } => { - if let Some(path) = move_path { - format!("{unified_diff}\n\nMoved to: {}", path.display()) - } else { - unified_diff.clone() - } - } - } -} - fn upsert_turn_item(items: &mut Vec, item: ThreadItem) { if let Some(existing_item) = items .iter_mut() @@ -1151,6 +1100,9 @@ struct PendingTurn { items: Vec, error: Option, status: TurnStatus, + started_at: Option, + completed_at: Option, + duration_ms: Option, /// True when this turn originated from an explicit `turn_started`/`turn_complete` /// boundary, so we preserve it even if it has no renderable items. opened_explicitly: bool, @@ -1171,6 +1123,11 @@ impl PendingTurn { self.status = status; self } + + fn with_started_at(mut self, started_at: Option) -> Self { + self.started_at = started_at; + self + } } impl From for Turn { @@ -1180,6 +1137,9 @@ impl From for Turn { items: value.items, error: value.error, status: value.status, + started_at: value.started_at, + completed_at: value.completed_at, + duration_ms: value.duration_ms, } } } @@ -1191,6 +1151,9 @@ impl From<&PendingTurn> for Turn { items: value.items.clone(), error: value.error.clone(), status: value.status.clone(), + started_at: value.started_at, + completed_at: value.completed_at, + duration_ms: value.duration_ms, } } } @@ -1205,6 +1168,7 @@ mod tests { use codex_protocol::items::TurnItem as CoreTurnItem; use codex_protocol::items::UserMessageItem as CoreUserMessageItem; use codex_protocol::items::build_hook_prompt_message; + use codex_protocol::mcp::CallToolResult; use codex_protocol::models::MessagePhase as CoreMessagePhase; use codex_protocol::models::WebSearchAction as CoreWebSearchAction; use codex_protocol::parse_command::ParsedCommand; @@ -1342,6 +1306,7 @@ mod tests { let events = vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: turn_id.to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -1362,6 +1327,8 @@ mod tests { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: turn_id.to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), ]; @@ -1414,6 +1381,7 @@ mod tests { let items = vec![ RolloutItem::EventMsg(EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-image".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), })), @@ -1433,6 +1401,8 @@ mod tests { RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-image".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, })), ]; @@ -1444,6 +1414,9 @@ mod tests { id: "turn-image".into(), status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, items: vec![ ThreadItem::UserMessage { id: "item-1".into(), @@ -1533,6 +1506,8 @@ mod tests { EventMsg::TurnAborted(TurnAbortedEvent { turn_id: Some("turn-1".into()), reason: TurnAbortReason::Replaced, + completed_at: None, + duration_ms: None, }), EventMsg::UserMessage(UserMessageEvent { message: "Let's try again".into(), @@ -1730,6 +1705,7 @@ mod tests { let events = vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-a".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -1748,6 +1724,8 @@ mod tests { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-a".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), ]; @@ -1784,6 +1762,7 @@ mod tests { let events = vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -1884,11 +1863,73 @@ mod tests { ); } + #[test] + fn reconstructs_mcp_tool_result_meta_from_persisted_completion_events() { + let events = vec![ + EventMsg::TurnStarted(TurnStartedEvent { + turn_id: "turn-1".into(), + started_at: None, + model_context_window: None, + collaboration_mode_kind: Default::default(), + }), + EventMsg::McpToolCallEnd(McpToolCallEndEvent { + call_id: "mcp-1".into(), + invocation: McpInvocation { + server: "docs".into(), + tool: "lookup".into(), + arguments: Some(serde_json::json!({"id":"123"})), + }, + duration: Duration::from_millis(8), + result: Ok(CallToolResult { + content: vec![serde_json::json!({ + "type": "text", + "text": "result" + })], + structured_content: Some(serde_json::json!({"id":"123"})), + is_error: Some(false), + meta: Some(serde_json::json!({ + "ui/resourceUri": "ui://widget/lookup.html" + })), + }), + }), + ]; + + let items = events + .into_iter() + .map(RolloutItem::EventMsg) + .collect::>(); + let turns = build_turns_from_rollout_items(&items); + assert_eq!(turns.len(), 1); + assert_eq!( + turns[0].items[0], + ThreadItem::McpToolCall { + id: "mcp-1".into(), + server: "docs".into(), + tool: "lookup".into(), + status: McpToolCallStatus::Completed, + arguments: serde_json::json!({"id":"123"}), + result: Some(McpToolCallResult { + content: vec![serde_json::json!({ + "type": "text", + "text": "result" + })], + structured_content: Some(serde_json::json!({"id":"123"})), + meta: Some(serde_json::json!({ + "ui/resourceUri": "ui://widget/lookup.html" + })), + }), + error: None, + duration_ms: Some(8), + } + ); + } + #[test] fn reconstructs_dynamic_tool_items_from_request_and_response_events() { let events = vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -1948,6 +1989,7 @@ mod tests { let events = vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -2030,11 +2072,144 @@ mod tests { ); } + #[test] + fn reconstructs_declined_guardian_command_item() { + let events = vec![ + EventMsg::TurnStarted(TurnStartedEvent { + turn_id: "turn-1".into(), + started_at: None, + model_context_window: None, + collaboration_mode_kind: Default::default(), + }), + EventMsg::UserMessage(UserMessageEvent { + message: "review this command".into(), + images: None, + text_elements: Vec::new(), + local_images: Vec::new(), + }), + EventMsg::GuardianAssessment(GuardianAssessmentEvent { + id: "guardian-exec".into(), + turn_id: "turn-1".into(), + status: GuardianAssessmentStatus::InProgress, + risk_score: None, + risk_level: None, + rationale: None, + action: serde_json::from_value(serde_json::json!({ + "type": "command", + "source": "shell", + "command": "rm -rf /tmp/guardian", + "cwd": "/tmp", + })) + .expect("guardian action"), + }), + EventMsg::GuardianAssessment(GuardianAssessmentEvent { + id: "guardian-exec".into(), + turn_id: "turn-1".into(), + status: GuardianAssessmentStatus::Denied, + risk_score: Some(97), + risk_level: Some(codex_protocol::protocol::GuardianRiskLevel::High), + rationale: Some("Would delete user data.".into()), + action: serde_json::from_value(serde_json::json!({ + "type": "command", + "source": "shell", + "command": "rm -rf /tmp/guardian", + "cwd": "/tmp", + })) + .expect("guardian action"), + }), + ]; + + let items = events + .into_iter() + .map(RolloutItem::EventMsg) + .collect::>(); + let turns = build_turns_from_rollout_items(&items); + assert_eq!(turns.len(), 1); + assert_eq!(turns[0].items.len(), 2); + assert_eq!( + turns[0].items[1], + ThreadItem::CommandExecution { + id: "guardian-exec".into(), + command: "rm -rf /tmp/guardian".into(), + cwd: PathBuf::from("/tmp"), + process_id: None, + source: CommandExecutionSource::Agent, + status: CommandExecutionStatus::Declined, + command_actions: vec![CommandAction::Unknown { + command: "rm -rf /tmp/guardian".into(), + }], + aggregated_output: None, + exit_code: None, + duration_ms: None, + } + ); + } + + #[test] + fn reconstructs_in_progress_guardian_execve_item() { + let events = vec![ + EventMsg::TurnStarted(TurnStartedEvent { + turn_id: "turn-1".into(), + started_at: None, + model_context_window: None, + collaboration_mode_kind: Default::default(), + }), + EventMsg::UserMessage(UserMessageEvent { + message: "run a subcommand".into(), + images: None, + text_elements: Vec::new(), + local_images: Vec::new(), + }), + EventMsg::GuardianAssessment(GuardianAssessmentEvent { + id: "guardian-execve".into(), + turn_id: "turn-1".into(), + status: GuardianAssessmentStatus::InProgress, + risk_score: None, + risk_level: None, + rationale: None, + action: serde_json::from_value(serde_json::json!({ + "type": "execve", + "source": "shell", + "program": "/bin/rm", + "argv": ["/usr/bin/rm", "-f", "/tmp/file.sqlite"], + "cwd": "/tmp", + })) + .expect("guardian action"), + }), + ]; + + let items = events + .into_iter() + .map(RolloutItem::EventMsg) + .collect::>(); + let turns = build_turns_from_rollout_items(&items); + assert_eq!(turns.len(), 1); + assert_eq!(turns[0].items.len(), 2); + assert_eq!( + turns[0].items[1], + ThreadItem::CommandExecution { + id: "guardian-execve".into(), + command: "/bin/rm -f /tmp/file.sqlite".into(), + cwd: PathBuf::from("/tmp"), + process_id: None, + source: CommandExecutionSource::Agent, + status: CommandExecutionStatus::InProgress, + command_actions: vec![CommandAction::Unknown { + command: "/bin/rm -f /tmp/file.sqlite".into(), + }], + aggregated_output: None, + exit_code: None, + duration_ms: None, + } + ); + } + #[test] fn assigns_late_exec_completion_to_original_turn() { let events = vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-a".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -2047,9 +2222,12 @@ mod tests { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-a".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-b".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -2081,6 +2259,8 @@ mod tests { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-b".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), ]; @@ -2118,6 +2298,7 @@ mod tests { let events = vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-a".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -2130,9 +2311,12 @@ mod tests { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-a".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-b".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -2164,6 +2348,8 @@ mod tests { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-b".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), ]; @@ -2196,6 +2382,7 @@ mod tests { let events = vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: turn_id.to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -2259,6 +2446,7 @@ mod tests { let events = vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: turn_id.to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -2321,6 +2509,7 @@ mod tests { let events = vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-a".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -2333,9 +2522,12 @@ mod tests { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-a".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-b".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -2348,6 +2540,8 @@ mod tests { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-a".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), EventMsg::AgentMessage(AgentMessageEvent { message: "still in b".into(), @@ -2357,6 +2551,8 @@ mod tests { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-b".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), ]; @@ -2376,6 +2572,7 @@ mod tests { let events = vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-a".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -2388,9 +2585,12 @@ mod tests { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-a".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-b".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -2403,6 +2603,8 @@ mod tests { EventMsg::TurnAborted(TurnAbortedEvent { turn_id: Some("turn-a".into()), reason: TurnAbortReason::Replaced, + completed_at: None, + duration_ms: None, }), EventMsg::AgentMessage(AgentMessageEvent { message: "still in b".into(), @@ -2428,6 +2630,7 @@ mod tests { let items = vec![ RolloutItem::EventMsg(EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-compact".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), })), @@ -2438,6 +2641,8 @@ mod tests { RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-compact".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, })), ]; @@ -2448,6 +2653,9 @@ mod tests { id: "turn-compact".into(), status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, items: Vec::new(), }] ); @@ -2665,6 +2873,7 @@ mod tests { let events = vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-a".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -2677,6 +2886,8 @@ mod tests { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-a".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), EventMsg::Error(ErrorEvent { message: "request-level failure".into(), @@ -2696,6 +2907,9 @@ mod tests { id: "turn-a".into(), status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, items: vec![ThreadItem::UserMessage { id: "item-1".into(), content: vec![UserInput::Text { @@ -2712,6 +2926,7 @@ mod tests { let events = vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-a".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }), @@ -2730,6 +2945,8 @@ mod tests { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-a".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), ]; @@ -2765,6 +2982,7 @@ mod tests { let items = vec![ RolloutItem::EventMsg(EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-a".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), })), @@ -2778,6 +2996,8 @@ mod tests { RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-a".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, })), ]; @@ -2808,6 +3028,7 @@ mod tests { let items = vec![ RolloutItem::EventMsg(EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-a".into(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), })), @@ -2823,6 +3044,8 @@ mod tests { RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-a".into(), last_agent_message: None, + completed_at: None, + duration_ms: None, })), ]; diff --git a/codex-rs/app-server-protocol/src/protocol/v2.rs b/codex-rs/app-server-protocol/src/protocol/v2.rs index cdc78647a1..e0d22bfa24 100644 --- a/codex-rs/app-server-protocol/src/protocol/v2.rs +++ b/codex-rs/app-server-protocol/src/protocol/v2.rs @@ -29,6 +29,7 @@ use codex_protocol::config_types::WebSearchToolConfig; use codex_protocol::items::AgentMessageContent as CoreAgentMessageContent; use codex_protocol::items::TurnItem as CoreTurnItem; use codex_protocol::mcp::Resource as McpResource; +pub use codex_protocol::mcp::ResourceContent as McpResourceContent; use codex_protocol::mcp::ResourceTemplate as McpResourceTemplate; use codex_protocol::mcp::Tool as McpTool; use codex_protocol::memory_citation::MemoryCitation as CoreMemoryCitation; @@ -884,6 +885,7 @@ pub struct NetworkRequirements { /// Legacy compatibility view derived from `unix_sockets`. pub allow_unix_sockets: Option>, pub allow_local_binding: Option, + pub danger_full_access_denylist_only: Option, } #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)] @@ -1949,6 +1951,18 @@ pub struct ListMcpServerStatusParams { /// Optional page size; defaults to a server-defined value. #[ts(optional = nullable)] pub limit: Option, + /// Controls how much MCP inventory data to fetch for each server. + /// Defaults to `Full` when omitted. + #[ts(optional = nullable)] + pub detail: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(rename_all = "camelCase", export_to = "v2/")] +pub enum McpServerStatusDetail { + Full, + ToolsAndAuthOnly, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] @@ -1972,6 +1986,22 @@ pub struct ListMcpServerStatusResponse { pub next_cursor: Option, } +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct McpResourceReadParams { + pub thread_id: String, + pub server: String, + pub uri: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct McpResourceReadResponse { + pub contents: Vec, +} + #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)] #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] @@ -2319,17 +2349,17 @@ pub struct FsCopyResponse {} #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] pub struct FsWatchParams { + /// Connection-scoped watch identifier used for `fs/unwatch` and `fs/changed`. + pub watch_id: String, /// Absolute file or directory path to watch. pub path: AbsolutePathBuf, } -/// Created watch handle returned by `fs/watch`. +/// Successful response for `fs/watch`. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] pub struct FsWatchResponse { - /// Connection-scoped watch identifier used for `fs/unwatch` and `fs/changed`. - pub watch_id: String, /// Canonicalized path associated with the watch. pub path: AbsolutePathBuf, } @@ -2339,7 +2369,7 @@ pub struct FsWatchResponse { #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] pub struct FsUnwatchParams { - /// Watch identifier returned by `fs/watch`. + /// Watch identifier previously provided to `fs/watch`. pub watch_id: String, } @@ -2354,7 +2384,7 @@ pub struct FsUnwatchResponse {} #[serde(rename_all = "camelCase")] #[ts(export_to = "v2/")] pub struct FsChangedNotification { - /// Watch identifier returned by `fs/watch`. + /// Watch identifier previously provided to `fs/watch`. pub watch_id: String, /// File or directory paths associated with this event. pub changed_paths: Vec, @@ -2573,10 +2603,22 @@ pub struct ThreadStartParams { pub config: Option>, #[ts(optional = nullable)] pub service_name: Option, + #[serde( + default, + deserialize_with = "super::serde_helpers::deserialize_double_option", + serialize_with = "super::serde_helpers::serialize_double_option", + skip_serializing_if = "Option::is_none" + )] #[ts(optional = nullable)] - pub base_instructions: Option, + pub base_instructions: Option>, + #[serde( + default, + deserialize_with = "super::serde_helpers::deserialize_double_option", + serialize_with = "super::serde_helpers::serialize_double_option", + skip_serializing_if = "Option::is_none" + )] #[ts(optional = nullable)] - pub developer_instructions: Option, + pub developer_instructions: Option>, #[ts(optional = nullable)] pub personality: Option, #[ts(optional = nullable)] @@ -2691,10 +2733,22 @@ pub struct ThreadResumeParams { pub sandbox: Option, #[ts(optional = nullable)] pub config: Option>, + #[serde( + default, + deserialize_with = "super::serde_helpers::deserialize_double_option", + serialize_with = "super::serde_helpers::serialize_double_option", + skip_serializing_if = "Option::is_none" + )] #[ts(optional = nullable)] - pub base_instructions: Option, + pub base_instructions: Option>, + #[serde( + default, + deserialize_with = "super::serde_helpers::deserialize_double_option", + serialize_with = "super::serde_helpers::serialize_double_option", + skip_serializing_if = "Option::is_none" + )] #[ts(optional = nullable)] - pub developer_instructions: Option, + pub developer_instructions: Option>, #[ts(optional = nullable)] pub personality: Option, /// If true, persist additional rollout EventMsg variants required to @@ -2768,10 +2822,22 @@ pub struct ThreadForkParams { pub sandbox: Option, #[ts(optional = nullable)] pub config: Option>, + #[serde( + default, + deserialize_with = "super::serde_helpers::deserialize_double_option", + serialize_with = "super::serde_helpers::serialize_double_option", + skip_serializing_if = "Option::is_none" + )] #[ts(optional = nullable)] - pub base_instructions: Option, + pub base_instructions: Option>, + #[serde( + default, + deserialize_with = "super::serde_helpers::deserialize_double_option", + serialize_with = "super::serde_helpers::serialize_double_option", + skip_serializing_if = "Option::is_none" + )] #[ts(optional = nullable)] - pub developer_instructions: Option, + pub developer_instructions: Option>, #[serde(default, skip_serializing_if = "std::ops::Not::not")] pub ephemeral: bool, /// If true, persist additional rollout EventMsg variants required to @@ -3693,6 +3759,15 @@ pub struct Turn { pub status: TurnStatus, /// Only populated when the Turn's status is failed. pub error: Option, + /// Unix timestamp (in seconds) when the turn started. + #[ts(type = "number | null")] + pub started_at: Option, + /// Unix timestamp (in seconds) when the turn completed. + #[ts(type = "number | null")] + pub completed_at: Option, + /// Duration between turn start and completion in milliseconds, if known. + #[ts(type = "number | null")] + pub duration_ms: Option, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] @@ -4973,6 +5048,9 @@ pub struct McpToolCallResult { // representations). Using `JsonValue` keeps the payload wire-shaped and easy to export. pub content: Vec, pub structured_content: Option, + #[serde(rename = "_meta")] + #[ts(rename = "_meta")] + pub meta: Option, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] @@ -7782,6 +7860,7 @@ mod tests { dangerously_allow_all_unix_sockets: None, domains: None, managed_allowed_domains_only: None, + danger_full_access_denylist_only: None, allowed_domains: Some(vec!["api.openai.com".to_string()]), denied_domains: Some(vec!["blocked.example.com".to_string()]), unix_sockets: None, @@ -7808,6 +7887,7 @@ mod tests { ), ])), managed_allowed_domains_only: Some(true), + danger_full_access_denylist_only: Some(true), allowed_domains: Some(vec!["api.openai.com".to_string()]), denied_domains: Some(vec!["blocked.example.com".to_string()]), unix_sockets: Some(BTreeMap::from([ @@ -7838,6 +7918,7 @@ mod tests { "blocked.example.com": "deny" }, "managedAllowedDomainsOnly": true, + "dangerFullAccessDenylistOnly": true, "allowedDomains": ["api.openai.com"], "deniedDomains": ["blocked.example.com"], "unixSockets": { @@ -8283,6 +8364,35 @@ mod tests { assert_eq!(serialized_without_override.get("serviceTier"), None); } + #[test] + fn thread_start_params_preserve_explicit_null_instructions() { + let params: ThreadStartParams = serde_json::from_value(json!({ + "baseInstructions": null, + "developerInstructions": null, + })) + .expect("params should deserialize"); + assert_eq!(params.base_instructions, Some(None)); + assert_eq!(params.developer_instructions, Some(None)); + + let serialized = serde_json::to_value(¶ms).expect("params should serialize"); + assert_eq!( + serialized.get("baseInstructions"), + Some(&serde_json::Value::Null) + ); + assert_eq!( + serialized.get("developerInstructions"), + Some(&serde_json::Value::Null) + ); + + let serialized_without_override = + serde_json::to_value(ThreadStartParams::default()).expect("params should serialize"); + assert_eq!(serialized_without_override.get("baseInstructions"), None); + assert_eq!( + serialized_without_override.get("developerInstructions"), + None + ); + } + #[test] fn turn_start_params_preserve_explicit_null_service_tier() { let params: TurnStartParams = serde_json::from_value(json!({ diff --git a/codex-rs/app-server/Cargo.toml b/codex-rs/app-server/Cargo.toml index 4db6b81a21..5dc3a31485 100644 --- a/codex-rs/app-server/Cargo.toml +++ b/codex-rs/app-server/Cargo.toml @@ -57,10 +57,12 @@ codex-state = { workspace = true } codex-tools = { workspace = true } codex-utils-absolute-path = { workspace = true } codex-utils-json-to-toml = { workspace = true } +codex-utils-rustls-provider = { workspace = true } chrono = { workspace = true } clap = { workspace = true, features = ["derive"] } constant_time_eq = { workspace = true } futures = { workspace = true } +gethostname = { workspace = true } hmac = { workspace = true } jsonwebtoken = { workspace = true } owo-colors = { workspace = true, features = ["supports-colors"] } @@ -81,6 +83,7 @@ tokio-util = { workspace = true } tokio-tungstenite = { workspace = true } tracing = { workspace = true, features = ["log"] } tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "json"] } +url = { workspace = true } uuid = { workspace = true, features = ["serde", "v7"] } [dev-dependencies] diff --git a/codex-rs/app-server/README.md b/codex-rs/app-server/README.md index c24c3fd7e8..3aff647766 100644 --- a/codex-rs/app-server/README.md +++ b/codex-rs/app-server/README.md @@ -25,6 +25,7 @@ Supported transports: - stdio (`--listen stdio://`, default): newline-delimited JSON (JSONL) - websocket (`--listen ws://IP:PORT`): one JSON-RPC message per websocket text frame (**experimental / unsupported**) +- off (`--listen off`): do not expose a local transport When running with `--listen ws://IP:PORT`, the same listener also serves basic HTTP health probes: @@ -168,7 +169,7 @@ Example with notification opt-out: - `fs/readDirectory` — list direct child entries for an absolute directory path; each entry contains `fileName`, `isDirectory`, and `isFile`, and `fileName` is just the child name, not a path. - `fs/remove` — remove an absolute file or directory tree; `recursive` and `force` default to `true`. - `fs/copy` — copy between absolute paths; directory copies require `recursive: true`. -- `fs/watch` — subscribe this connection to filesystem change notifications for an absolute file or directory path; returns a `watchId` and canonicalized `path`. +- `fs/watch` — subscribe this connection to filesystem change notifications for an absolute file or directory path and caller-provided `watchId`; returns the canonicalized `path`. - `fs/unwatch` — stop sending notifications for a prior `fs/watch`; returns `{}`. - `fs/changed` — notification emitted when watched paths change, including the `watchId` and `changedPaths`. - `model/list` — list available models (set `includeHidden: true` to include entries with `hidden: true`), with reasoning effort options, optional legacy `upgrade` model ids, optional `upgradeInfo` metadata (`model`, `upgradeCopy`, `modelLink`, `migrationMarkdown`), and optional `availabilityNux` metadata. @@ -186,7 +187,8 @@ Example with notification opt-out: - `mcpServer/oauth/login` — start an OAuth login for a configured MCP server; returns an `authorization_url` and later emits `mcpServer/oauthLogin/completed` once the browser flow finishes. - `tool/requestUserInput` — prompt the user with 1–3 short questions for a tool call and return their answers (experimental). - `config/mcpServer/reload` — reload MCP server config from disk and queue a refresh for loaded threads (applied on each thread's next active turn); returns `{}`. Use this after editing `config.toml` without restarting the server. -- `mcpServerStatus/list` — enumerate configured MCP servers with their tools, resources, resource templates, and auth status; supports cursor+limit pagination. +- `mcpServerStatus/list` — enumerate configured MCP servers with their tools and auth status, plus resources/resource templates for `full` detail; supports cursor+limit pagination. If `detail` is omitted, the server defaults to `full`. +- `mcpServer/resource/read` — read a resource from a thread's configured MCP server by `threadId`, `server`, and `uri`, returning text/blob resource `contents`. - `windowsSandbox/setupStart` — start Windows sandbox setup for the selected mode (`elevated` or `unelevated`); accepts an optional absolute `cwd` to target setup for a specific workspace, returns `{ started: true }` immediately, and later emits `windowsSandbox/setupCompleted`. - `feedback/upload` — submit a feedback report (classification + optional reason/logs, conversation_id, and optional `extraLogFiles` attachments array); returns the tracking thread id. - `config/read` — fetch the effective config on disk after resolving config layering. @@ -194,7 +196,7 @@ Example with notification opt-out: - `externalAgentConfig/import` — apply selected external-agent migration items by passing explicit `migrationItems` with `cwd` (`null` for home). - `config/value/write` — write a single config key/value to the user's config.toml on disk. - `config/batchWrite` — apply multiple config edits atomically to the user's config.toml on disk, with optional `reloadUserConfig: true` to hot-reload loaded threads. -- `configRequirements/read` — fetch loaded requirements constraints from `requirements.toml` and/or MDM (or `null` if none are configured), including allow-lists (`allowedApprovalPolicies`, `allowedSandboxModes`, `allowedWebSearchModes`), pinned feature values (`featureRequirements`), `enforceResidency`, and `network` constraints such as canonical domain/socket permissions plus `managedAllowedDomainsOnly`. +- `configRequirements/read` — fetch loaded requirements constraints from `requirements.toml` and/or MDM (or `null` if none are configured), including allow-lists (`allowedApprovalPolicies`, `allowedSandboxModes`, `allowedWebSearchModes`), pinned feature values (`featureRequirements`), `enforceResidency`, and `network` constraints such as canonical domain/socket permissions plus `managedAllowedDomainsOnly` and `dangerFullAccessDenylistOnly`. ### Example: Start or resume a thread @@ -814,10 +816,10 @@ All filesystem paths in this section must be absolute. ```json { "method": "fs/watch", "id": 44, "params": { + "watchId": "0195ec6b-1d6f-7c2e-8c7a-56f2c4a8b9d1", "path": "/Users/me/project/.git/HEAD" } } { "id": 44, "result": { - "watchId": "0195ec6b-1d6f-7c2e-8c7a-56f2c4a8b9d1", "path": "/Users/me/project/.git/HEAD" } } { "method": "fs/changed", "params": { diff --git a/codex-rs/app-server/src/app_server_tracing.rs b/codex-rs/app-server/src/app_server_tracing.rs index 26fe8ca999..b06a8e52c4 100644 --- a/codex-rs/app-server/src/app_server_tracing.rs +++ b/codex-rs/app-server/src/app_server_tracing.rs @@ -86,6 +86,7 @@ fn transport_name(transport: AppServerTransport) -> &'static str { match transport { AppServerTransport::Stdio => "stdio", AppServerTransport::WebSocket { .. } => "websocket", + AppServerTransport::Off => "off", } } diff --git a/codex-rs/app-server/src/bespoke_event_handling.rs b/codex-rs/app-server/src/bespoke_event_handling.rs index 78c60b0aee..f5c60fe9dc 100644 --- a/codex-rs/app-server/src/bespoke_event_handling.rs +++ b/codex-rs/app-server/src/bespoke_event_handling.rs @@ -7,9 +7,9 @@ use crate::error_code::INVALID_REQUEST_ERROR_CODE; use crate::outgoing_message::ClientRequestResult; use crate::outgoing_message::ThreadScopedOutgoingMessageSender; use crate::server_request_error::is_turn_transition_server_request_error; -use crate::thread_state::ThreadListenerCommand; use crate::thread_state::ThreadState; use crate::thread_state::TurnSummary; +use crate::thread_state::resolve_server_request_on_thread_listener; use crate::thread_status::ThreadWatchActiveGuard; use crate::thread_status::ThreadWatchManager; use codex_app_server_protocol::AccountRateLimitsUpdatedNotification; @@ -43,14 +43,10 @@ use codex_app_server_protocol::FileChangeRequestApprovalParams; use codex_app_server_protocol::FileChangeRequestApprovalResponse; use codex_app_server_protocol::FileUpdateChange; use codex_app_server_protocol::GrantedPermissionProfile as V2GrantedPermissionProfile; -use codex_app_server_protocol::GuardianApprovalReview; -use codex_app_server_protocol::GuardianApprovalReviewStatus; use codex_app_server_protocol::HookCompletedNotification; use codex_app_server_protocol::HookStartedNotification; use codex_app_server_protocol::InterruptConversationResponse; use codex_app_server_protocol::ItemCompletedNotification; -use codex_app_server_protocol::ItemGuardianApprovalReviewCompletedNotification; -use codex_app_server_protocol::ItemGuardianApprovalReviewStartedNotification; use codex_app_server_protocol::ItemStartedNotification; use codex_app_server_protocol::JSONRPCErrorError; use codex_app_server_protocol::McpServerElicitationAction; @@ -102,8 +98,14 @@ use codex_app_server_protocol::TurnPlanStep; use codex_app_server_protocol::TurnPlanUpdatedNotification; use codex_app_server_protocol::TurnStartedNotification; use codex_app_server_protocol::TurnStatus; +use codex_app_server_protocol::build_command_execution_end_item; +use codex_app_server_protocol::build_file_change_approval_request_item; +use codex_app_server_protocol::build_file_change_begin_item; +use codex_app_server_protocol::build_file_change_end_item; +use codex_app_server_protocol::build_item_from_guardian_event; use codex_app_server_protocol::build_turns_from_rollout_items; use codex_app_server_protocol::convert_patch_changes; +use codex_app_server_protocol::guardian_auto_approval_review_notification; use codex_core::CodexThread; use codex_core::ThreadManager; use codex_core::find_thread_name_by_id; @@ -114,13 +116,10 @@ use codex_protocol::dynamic_tools::DynamicToolCallOutputContentItem as CoreDynam use codex_protocol::dynamic_tools::DynamicToolResponse as CoreDynamicToolResponse; use codex_protocol::items::parse_hook_prompt_message; use codex_protocol::plan_tool::UpdatePlanArgs; -use codex_protocol::protocol::ApplyPatchApprovalRequestEvent; use codex_protocol::protocol::CodexErrorInfo as CoreCodexErrorInfo; use codex_protocol::protocol::Event; use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::ExecApprovalRequestEvent; -use codex_protocol::protocol::ExecCommandEndEvent; -use codex_protocol::protocol::GuardianAssessmentEvent; use codex_protocol::protocol::McpToolCallBeginEvent; use codex_protocol::protocol::McpToolCallEndEvent; use codex_protocol::protocol::Op; @@ -128,6 +127,8 @@ use codex_protocol::protocol::RealtimeEvent; use codex_protocol::protocol::ReviewDecision; use codex_protocol::protocol::ReviewOutputEvent; use codex_protocol::protocol::TokenCountEvent; +use codex_protocol::protocol::TurnAbortedEvent; +use codex_protocol::protocol::TurnCompleteEvent; use codex_protocol::protocol::TurnDiffEvent; use codex_protocol::request_permissions::PermissionGrantScope as CorePermissionGrantScope; use codex_protocol::request_permissions::RequestPermissionProfile as CoreRequestPermissionProfile; @@ -137,7 +138,6 @@ use codex_protocol::request_user_input::RequestUserInputResponse as CoreRequestU use codex_sandboxing::policy_transforms::intersect_permission_profiles; use codex_shell_command::parse_command::shlex_join; use std::collections::HashMap; -use std::convert::TryFrom; use std::path::Path; use std::path::PathBuf; use std::sync::Arc; @@ -153,105 +153,13 @@ enum CommandExecutionApprovalPresentation { Command(CommandExecutionCompletionItem), } +#[derive(Debug, PartialEq)] struct CommandExecutionCompletionItem { command: String, cwd: PathBuf, command_actions: Vec, } -async fn resolve_server_request_on_thread_listener( - thread_state: &Arc>, - request_id: RequestId, -) { - let (completion_tx, completion_rx) = oneshot::channel(); - let listener_command_tx = { - let state = thread_state.lock().await; - state.listener_command_tx() - }; - let Some(listener_command_tx) = listener_command_tx else { - error!("failed to remove pending client request: thread listener is not running"); - return; - }; - - if listener_command_tx - .send(ThreadListenerCommand::ResolveServerRequest { - request_id, - completion_tx, - }) - .is_err() - { - error!( - "failed to remove pending client request: thread listener command channel is closed" - ); - return; - } - - if let Err(err) = completion_rx.await { - error!("failed to remove pending client request: {err}"); - } -} - -fn guardian_auto_approval_review_notification( - conversation_id: &ThreadId, - event_turn_id: &str, - assessment: &GuardianAssessmentEvent, -) -> ServerNotification { - // TODO(ccunningham): Attach guardian review state to the reviewed tool - // item's lifecycle instead of sending standalone review notifications so - // the app-server API can persist and replay review state via `thread/read`. - let turn_id = if assessment.turn_id.is_empty() { - event_turn_id.to_string() - } else { - assessment.turn_id.clone() - }; - let review = GuardianApprovalReview { - status: match assessment.status { - codex_protocol::protocol::GuardianAssessmentStatus::InProgress => { - GuardianApprovalReviewStatus::InProgress - } - codex_protocol::protocol::GuardianAssessmentStatus::Approved => { - GuardianApprovalReviewStatus::Approved - } - codex_protocol::protocol::GuardianAssessmentStatus::Denied => { - GuardianApprovalReviewStatus::Denied - } - codex_protocol::protocol::GuardianAssessmentStatus::Aborted => { - GuardianApprovalReviewStatus::Aborted - } - }, - risk_score: assessment.risk_score, - risk_level: assessment.risk_level.map(Into::into), - rationale: assessment.rationale.clone(), - }; - let action = assessment.action.clone().into(); - match assessment.status { - codex_protocol::protocol::GuardianAssessmentStatus::InProgress => { - ServerNotification::ItemGuardianApprovalReviewStarted( - ItemGuardianApprovalReviewStartedNotification { - thread_id: conversation_id.to_string(), - turn_id, - target_item_id: assessment.id.clone(), - review, - action, - }, - ) - } - codex_protocol::protocol::GuardianAssessmentStatus::Approved - | codex_protocol::protocol::GuardianAssessmentStatus::Denied - | codex_protocol::protocol::GuardianAssessmentStatus::Aborted => { - ServerNotification::ItemGuardianApprovalReviewCompleted( - ItemGuardianApprovalReviewCompletedNotification { - thread_id: conversation_id.to_string(), - turn_id, - target_item_id: assessment.id.clone(), - review, - action, - }, - ) - } - } -} - #[allow(clippy::too_many_arguments)] pub(crate) async fn apply_bespoke_event_handling( event: Event, @@ -284,6 +192,9 @@ pub(crate) async fn apply_bespoke_event_handling( items: Vec::new(), error: None, status: TurnStatus::InProgress, + started_at: payload.started_at, + completed_at: None, + duration_ms: None, }) }; let notification = TurnStartedNotification { @@ -295,14 +206,21 @@ pub(crate) async fn apply_bespoke_event_handling( .await; } } - EventMsg::TurnComplete(_ev) => { + EventMsg::TurnComplete(turn_complete_event) => { // All per-thread requests are bound to a turn, so abort them. outgoing.abort_pending_server_requests().await; let turn_failed = thread_state.lock().await.turn_summary.last_error.is_some(); thread_watch_manager .note_turn_completed(&conversation_id.to_string(), turn_failed) .await; - handle_turn_complete(conversation_id, event_turn_id, &outgoing, &thread_state).await; + handle_turn_complete( + conversation_id, + event_turn_id, + turn_complete_event, + &outgoing, + &thread_state, + ) + .await; } EventMsg::SkillsUpdateAvailable => { if let ApiVersion::V2 = api_version { @@ -344,12 +262,71 @@ pub(crate) async fn apply_bespoke_event_handling( EventMsg::Warning(_warning_event) => {} EventMsg::GuardianAssessment(assessment) => { if let ApiVersion::V2 = api_version { + let pending_command_execution = match build_item_from_guardian_event( + &assessment, + CommandExecutionStatus::InProgress, + ) { + Some(ThreadItem::CommandExecution { + command, + cwd, + command_actions, + .. + }) => Some(CommandExecutionCompletionItem { + command, + cwd, + command_actions, + }), + Some(_) | None => None, + }; + let assessment_turn_id = if assessment.turn_id.is_empty() { + event_turn_id.clone() + } else { + assessment.turn_id.clone() + }; + if assessment.status + == codex_protocol::protocol::GuardianAssessmentStatus::InProgress + && let Some(completion_item) = pending_command_execution.as_ref() + { + start_command_execution_item( + &conversation_id, + assessment_turn_id.clone(), + assessment.id.clone(), + completion_item.command.clone(), + completion_item.cwd.clone(), + completion_item.command_actions.clone(), + CommandExecutionSource::Agent, + &outgoing, + &thread_state, + ) + .await; + } let notification = guardian_auto_approval_review_notification( &conversation_id, &event_turn_id, &assessment, ); outgoing.send_server_notification(notification).await; + if matches!( + assessment.status, + codex_protocol::protocol::GuardianAssessmentStatus::Denied + | codex_protocol::protocol::GuardianAssessmentStatus::Aborted + ) && let Some(completion_item) = pending_command_execution + { + complete_command_execution_item( + &conversation_id, + assessment_turn_id, + assessment.id.clone(), + completion_item.command, + completion_item.cwd, + /*process_id*/ None, + CommandExecutionSource::Agent, + completion_item.command_actions, + CommandExecutionStatus::Declined, + &outgoing, + &thread_state, + ) + .await; + } } } EventMsg::ModelReroute(event) => { @@ -503,13 +480,7 @@ pub(crate) async fn apply_bespoke_event_handling( .await; } } - EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent { - call_id, - turn_id, - changes, - reason, - grant_root, - }) => { + EventMsg::ApplyPatchApprovalRequest(event) => { let permission_guard = thread_watch_manager .note_permission_requested(&conversation_id.to_string()) .await; @@ -517,14 +488,15 @@ pub(crate) async fn apply_bespoke_event_handling( ApiVersion::V1 => { let params = ApplyPatchApprovalParams { conversation_id, - call_id: call_id.clone(), - file_changes: changes.clone(), - reason, - grant_root, + call_id: event.call_id.clone(), + file_changes: event.changes.clone(), + reason: event.reason.clone(), + grant_root: event.grant_root.clone(), }; let (_pending_request_id, rx) = outgoing .send_request(ServerRequestPayload::ApplyPatchApproval(params)) .await; + let call_id = event.call_id.clone(); tokio::spawn(async move { let _permission_guard = permission_guard; on_patch_approval_response(call_id, rx, conversation).await; @@ -533,9 +505,8 @@ pub(crate) async fn apply_bespoke_event_handling( ApiVersion::V2 => { // Until we migrate the core to be aware of a first class FileChangeItem // and emit the corresponding EventMsg, we repurpose the call_id as the item_id. - let item_id = call_id.clone(); - let patch_changes = convert_patch_changes(&changes); - + let item_id = event.call_id.clone(); + let patch_changes = convert_patch_changes(&event.changes); let first_start = { let mut state = thread_state.lock().await; state @@ -544,11 +515,7 @@ pub(crate) async fn apply_bespoke_event_handling( .insert(item_id.clone()) }; if first_start { - let item = ThreadItem::FileChange { - id: item_id.clone(), - changes: patch_changes.clone(), - status: PatchApplyStatus::InProgress, - }; + let item = build_file_change_approval_request_item(&event); let notification = ItemStartedNotification { thread_id: conversation_id.to_string(), turn_id: event_turn_id.clone(), @@ -561,10 +528,10 @@ pub(crate) async fn apply_bespoke_event_handling( let params = FileChangeRequestApprovalParams { thread_id: conversation_id.to_string(), - turn_id: turn_id.clone(), + turn_id: event.turn_id.clone(), item_id: item_id.clone(), - reason, - grant_root, + reason: event.reason.clone(), + grant_root: event.grant_root.clone(), }; let (pending_request_id, rx) = outgoing .send_request(ServerRequestPayload::FileChangeRequestApproval(params)) @@ -668,6 +635,22 @@ pub(crate) async fn apply_bespoke_event_handling( Some(completion_item), ), }; + if approval_id.is_none() + && let Some(completion_item) = completion_item.as_ref() + { + start_command_execution_item( + &conversation_id, + event_turn_id.clone(), + call_id.clone(), + completion_item.command.clone(), + completion_item.cwd.clone(), + completion_item.command_actions.clone(), + CommandExecutionSource::Agent, + &outgoing, + &thread_state, + ) + .await; + } let proposed_execpolicy_amendment_v2 = proposed_execpolicy_amendment.map(V2ExecPolicyAmendment::from); let proposed_network_policy_amendments_v2 = proposed_network_policy_amendments @@ -1555,7 +1538,6 @@ pub(crate) async fn apply_bespoke_event_handling( // Until we migrate the core to be aware of a first class FileChangeItem // and emit the corresponding EventMsg, we repurpose the call_id as the item_id. let item_id = patch_begin_event.call_id.clone(); - let changes = convert_patch_changes(&patch_begin_event.changes); let first_start = { let mut state = thread_state.lock().await; @@ -1565,11 +1547,7 @@ pub(crate) async fn apply_bespoke_event_handling( .insert(item_id.clone()) }; if first_start { - let item = ThreadItem::FileChange { - id: item_id.clone(), - changes, - status: PatchApplyStatus::InProgress, - }; + let item = build_file_change_begin_item(&patch_begin_event); let notification = ItemStartedNotification { thread_id: conversation_id.to_string(), turn_id: event_turn_id.clone(), @@ -1584,14 +1562,10 @@ pub(crate) async fn apply_bespoke_event_handling( // Until we migrate the core to be aware of a first class FileChangeItem // and emit the corresponding EventMsg, we repurpose the call_id as the item_id. let item_id = patch_end_event.call_id.clone(); - - let status: PatchApplyStatus = (&patch_end_event.status).into(); - let changes = convert_patch_changes(&patch_end_event.changes); complete_file_change_item( conversation_id, item_id, - changes, - status, + build_file_change_end_item(&patch_end_event), event_turn_id.clone(), &outgoing, &thread_state, @@ -1608,35 +1582,35 @@ pub(crate) async fn apply_bespoke_event_handling( let command = shlex_join(&exec_command_begin_event.command); let cwd = exec_command_begin_event.cwd; let process_id = exec_command_begin_event.process_id; - - { + let first_start = { let mut state = thread_state.lock().await; state .turn_summary .command_execution_started - .insert(item_id.clone()); + .insert(item_id.clone()) + }; + if first_start { + let item = ThreadItem::CommandExecution { + id: item_id, + command, + cwd, + process_id, + source: exec_command_begin_event.source.into(), + status: CommandExecutionStatus::InProgress, + command_actions, + aggregated_output: None, + exit_code: None, + duration_ms: None, + }; + let notification = ItemStartedNotification { + thread_id: conversation_id.to_string(), + turn_id: event_turn_id.clone(), + item, + }; + outgoing + .send_server_notification(ServerNotification::ItemStarted(notification)) + .await; } - - let item = ThreadItem::CommandExecution { - id: item_id, - command, - cwd, - process_id, - source: exec_command_begin_event.source.into(), - status: CommandExecutionStatus::InProgress, - command_actions, - aggregated_output: None, - exit_code: None, - duration_ms: None, - }; - let notification = ItemStartedNotification { - thread_id: conversation_id.to_string(), - turn_id: event_turn_id.clone(), - item, - }; - outgoing - .send_server_notification(ServerNotification::ItemStarted(notification)) - .await; } EventMsg::ExecCommandOutputDelta(exec_command_output_delta_event) => { let item_id = exec_command_output_delta_event.call_id.clone(); @@ -1694,20 +1668,7 @@ pub(crate) async fn apply_bespoke_event_handling( .await; } EventMsg::ExecCommandEnd(exec_command_end_event) => { - let ExecCommandEndEvent { - call_id, - command, - cwd, - parsed_cmd, - process_id, - aggregated_output, - exit_code, - duration, - source, - status, - .. - } = exec_command_end_event; - + let call_id = exec_command_end_event.call_id.clone(); { let mut state = thread_state.lock().await; state @@ -1716,32 +1677,7 @@ pub(crate) async fn apply_bespoke_event_handling( .remove(&call_id); } - let status: CommandExecutionStatus = (&status).into(); - let command_actions = parsed_cmd - .into_iter() - .map(V2ParsedCommand::from) - .collect::>(); - - let aggregated_output = if aggregated_output.is_empty() { - None - } else { - Some(aggregated_output) - }; - - let duration_ms = i64::try_from(duration.as_millis()).unwrap_or(i64::MAX); - - let item = ThreadItem::CommandExecution { - id: call_id, - command: shlex_join(&command), - cwd, - process_id, - source: source.into(), - status, - command_actions, - aggregated_output, - exit_code: Some(exit_code), - duration_ms: Some(duration_ms), - }; + let item = build_command_execution_end_item(&exec_command_end_event); let notification = ItemCompletedNotification { thread_id: conversation_id.to_string(), @@ -1780,7 +1716,14 @@ pub(crate) async fn apply_bespoke_event_handling( thread_watch_manager .note_turn_interrupted(&conversation_id.to_string()) .await; - handle_turn_interrupted(conversation_id, event_turn_id, &outgoing, &thread_state).await; + handle_turn_interrupted( + conversation_id, + event_turn_id, + turn_aborted_event, + &outgoing, + &thread_state, + ) + .await; } EventMsg::ThreadRolledBack(_rollback_event) => { let pending = { @@ -1942,11 +1885,18 @@ async fn handle_turn_plan_update( } } +struct TurnCompletionMetadata { + status: TurnStatus, + error: Option, + started_at: Option, + completed_at: Option, + duration_ms: Option, +} + async fn emit_turn_completed_with_status( conversation_id: ThreadId, event_turn_id: String, - status: TurnStatus, - error: Option, + turn_completion_metadata: TurnCompletionMetadata, outgoing: &ThreadScopedOutgoingMessageSender, ) { let notification = TurnCompletedNotification { @@ -1954,8 +1904,11 @@ async fn emit_turn_completed_with_status( turn: Turn { id: event_turn_id, items: vec![], - error, - status, + error: turn_completion_metadata.error, + status: turn_completion_metadata.status, + started_at: turn_completion_metadata.started_at, + completed_at: turn_completion_metadata.completed_at, + duration_ms: turn_completion_metadata.duration_ms, }, }; outgoing @@ -1966,8 +1919,7 @@ async fn emit_turn_completed_with_status( async fn complete_file_change_item( conversation_id: ThreadId, item_id: String, - changes: Vec, - status: PatchApplyStatus, + item: ThreadItem, turn_id: String, outgoing: &ThreadScopedOutgoingMessageSender, thread_state: &Arc>, @@ -1976,11 +1928,6 @@ async fn complete_file_change_item( state.turn_summary.file_change_started.remove(&item_id); drop(state); - let item = ThreadItem::FileChange { - id: item_id, - changes, - status, - }; let notification = ItemCompletedNotification { thread_id: conversation_id.to_string(), turn_id, @@ -1991,9 +1938,52 @@ async fn complete_file_change_item( .await; } +#[allow(clippy::too_many_arguments)] +async fn start_command_execution_item( + conversation_id: &ThreadId, + turn_id: String, + item_id: String, + command: String, + cwd: PathBuf, + command_actions: Vec, + source: CommandExecutionSource, + outgoing: &ThreadScopedOutgoingMessageSender, + thread_state: &Arc>, +) -> bool { + let first_start = { + let mut state = thread_state.lock().await; + state + .turn_summary + .command_execution_started + .insert(item_id.clone()) + }; + if first_start { + let notification = ItemStartedNotification { + thread_id: conversation_id.to_string(), + turn_id, + item: ThreadItem::CommandExecution { + id: item_id, + command, + cwd, + process_id: None, + source, + status: CommandExecutionStatus::InProgress, + command_actions, + aggregated_output: None, + exit_code: None, + duration_ms: None, + }, + }; + outgoing + .send_server_notification(ServerNotification::ItemStarted(notification)) + .await; + } + first_start +} + #[allow(clippy::too_many_arguments)] async fn complete_command_execution_item( - conversation_id: ThreadId, + conversation_id: &ThreadId, turn_id: String, item_id: String, command: String, @@ -2003,7 +1993,18 @@ async fn complete_command_execution_item( command_actions: Vec, status: CommandExecutionStatus, outgoing: &ThreadScopedOutgoingMessageSender, + thread_state: &Arc>, ) { + let mut state = thread_state.lock().await; + let should_emit = state + .turn_summary + .command_execution_started + .remove(&item_id); + drop(state); + if !should_emit { + return; + } + let item = ThreadItem::CommandExecution { id: item_id, command, @@ -2101,6 +2102,7 @@ async fn find_and_remove_turn_summary( async fn handle_turn_complete( conversation_id: ThreadId, event_turn_id: String, + turn_complete_event: TurnCompleteEvent, outgoing: &ThreadScopedOutgoingMessageSender, thread_state: &Arc>, ) { @@ -2111,22 +2113,40 @@ async fn handle_turn_complete( None => (TurnStatus::Completed, None), }; - emit_turn_completed_with_status(conversation_id, event_turn_id, status, error, outgoing).await; + emit_turn_completed_with_status( + conversation_id, + event_turn_id, + TurnCompletionMetadata { + status, + error, + started_at: turn_summary.started_at, + completed_at: turn_complete_event.completed_at, + duration_ms: turn_complete_event.duration_ms, + }, + outgoing, + ) + .await; } async fn handle_turn_interrupted( conversation_id: ThreadId, event_turn_id: String, + turn_aborted_event: TurnAbortedEvent, outgoing: &ThreadScopedOutgoingMessageSender, thread_state: &Arc>, ) { - find_and_remove_turn_summary(conversation_id, thread_state).await; + let turn_summary = find_and_remove_turn_summary(conversation_id, thread_state).await; emit_turn_completed_with_status( conversation_id, event_turn_id, - TurnStatus::Interrupted, - /*error*/ None, + TurnCompletionMetadata { + status: TurnStatus::Interrupted, + error: None, + started_at: turn_summary.started_at, + completed_at: turn_aborted_event.completed_at, + duration_ms: turn_aborted_event.duration_ms, + }, outgoing, ) .await; @@ -2592,8 +2612,11 @@ async fn on_file_change_request_approval_response( complete_file_change_item( conversation_id, item_id.clone(), - changes, - status, + ThreadItem::FileChange { + id: item_id.clone(), + changes, + status, + }, event_turn_id.clone(), &outgoing, &thread_state, @@ -2710,7 +2733,7 @@ async fn on_command_execution_request_approval_response( && let Some(completion_item) = completion_item { complete_command_execution_item( - conversation_id, + &conversation_id, event_turn_id.clone(), item_id.clone(), completion_item.command, @@ -2720,6 +2743,7 @@ async fn on_command_execution_request_approval_response( completion_item.command_actions, status, &outgoing, + &thread_state, ) .await; } @@ -2819,6 +2843,7 @@ async fn construct_mcp_tool_call_end_notification( Some(McpToolCallResult { content: value.content.clone(), structured_content: value.structured_content.clone(), + meta: value.meta.clone(), }), None, ), @@ -2861,6 +2886,7 @@ mod tests { use codex_app_server_protocol::GuardianApprovalReviewStatus; use codex_app_server_protocol::JSONRPCErrorError; use codex_app_server_protocol::TurnPlanStepStatus; + use codex_login::CodexAuth; use codex_protocol::items::HookPromptFragment; use codex_protocol::items::build_hook_prompt_message; use codex_protocol::mcp::CallToolResult; @@ -2871,16 +2897,22 @@ mod tests { use codex_protocol::protocol::CollabResumeBeginEvent; use codex_protocol::protocol::CollabResumeEndEvent; use codex_protocol::protocol::CreditsSnapshot; + use codex_protocol::protocol::GuardianAssessmentEvent; + use codex_protocol::protocol::GuardianAssessmentStatus; use codex_protocol::protocol::McpInvocation; use codex_protocol::protocol::RateLimitSnapshot; use codex_protocol::protocol::RateLimitWindow; use codex_protocol::protocol::TokenUsage; use codex_protocol::protocol::TokenUsageInfo; use codex_utils_absolute_path::AbsolutePathBuf; + use core_test_support::load_default_config_for_test; use pretty_assertions::assert_eq; use rmcp::model::Content; use serde_json::Value as JsonValue; + use serde_json::json; + use std::path::PathBuf; use std::time::Duration; + use tempfile::TempDir; use tokio::sync::Mutex; use tokio::sync::mpsc; @@ -2888,6 +2920,9 @@ mod tests { Arc::new(Mutex::new(ThreadState::default())) } + const TEST_TURN_COMPLETED_AT: i64 = 1_716_000_456; + const TEST_TURN_DURATION_MS: i64 = 1_234; + async fn recv_broadcast_message( rx: &mut mpsc::Receiver, ) -> Result { @@ -2901,6 +2936,102 @@ mod tests { } } + fn turn_complete_event(turn_id: &str) -> TurnCompleteEvent { + TurnCompleteEvent { + turn_id: turn_id.to_string(), + last_agent_message: None, + completed_at: Some(TEST_TURN_COMPLETED_AT), + duration_ms: Some(TEST_TURN_DURATION_MS), + } + } + + fn turn_aborted_event(turn_id: &str) -> TurnAbortedEvent { + TurnAbortedEvent { + turn_id: Some(turn_id.to_string()), + reason: codex_protocol::protocol::TurnAbortReason::Interrupted, + completed_at: Some(TEST_TURN_COMPLETED_AT), + duration_ms: Some(TEST_TURN_DURATION_MS), + } + } + + fn command_execution_completion_item(command: &str) -> CommandExecutionCompletionItem { + CommandExecutionCompletionItem { + command: command.to_string(), + cwd: PathBuf::from("/tmp"), + command_actions: vec![V2ParsedCommand::Unknown { + command: command.to_string(), + }], + } + } + + fn guardian_command_assessment( + id: &str, + turn_id: &str, + status: GuardianAssessmentStatus, + ) -> GuardianAssessmentEvent { + let (risk_score, risk_level, rationale) = match status { + GuardianAssessmentStatus::InProgress => (None, None, None), + GuardianAssessmentStatus::Approved => ( + Some(12), + Some(codex_protocol::protocol::GuardianRiskLevel::Low), + Some("looks safe".to_string()), + ), + GuardianAssessmentStatus::Denied => ( + Some(88), + Some(codex_protocol::protocol::GuardianRiskLevel::High), + Some("too risky".to_string()), + ), + GuardianAssessmentStatus::Aborted => (None, None, None), + }; + GuardianAssessmentEvent { + id: id.to_string(), + turn_id: turn_id.to_string(), + status, + risk_score, + risk_level, + rationale, + action: serde_json::from_value(json!({ + "type": "command", + "source": "shell", + "command": format!("rm -f /tmp/{id}.sqlite"), + "cwd": "/tmp", + })) + .expect("guardian action"), + } + } + + struct GuardianAssessmentTestContext { + conversation_id: ThreadId, + conversation: Arc, + thread_manager: Arc, + outgoing: ThreadScopedOutgoingMessageSender, + thread_state: Arc>, + thread_watch_manager: ThreadWatchManager, + codex_home: PathBuf, + } + + impl GuardianAssessmentTestContext { + async fn apply_guardian_assessment_event(&self, assessment: GuardianAssessmentEvent) { + let event_turn_id = assessment.turn_id.clone(); + apply_bespoke_event_handling( + Event { + id: event_turn_id, + msg: EventMsg::GuardianAssessment(assessment), + }, + self.conversation_id, + self.conversation.clone(), + self.thread_manager.clone(), + self.outgoing.clone(), + self.thread_state.clone(), + self.thread_watch_manager.clone(), + ApiVersion::V2, + "test-provider".to_string(), + &self.codex_home, + ) + .await; + } + } + #[test] fn guardian_assessment_started_uses_event_turn_id_fallback() { let conversation_id = ThreadId::new(); @@ -3019,6 +3150,313 @@ mod tests { } } + #[tokio::test] + async fn command_execution_started_helper_emits_once() -> Result<()> { + let conversation_id = ThreadId::new(); + let thread_state = new_thread_state(); + let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY); + let outgoing = Arc::new(OutgoingMessageSender::new(tx)); + let outgoing = ThreadScopedOutgoingMessageSender::new( + outgoing, + vec![ConnectionId(1)], + ThreadId::new(), + ); + let completion_item = command_execution_completion_item("printf hi"); + + let first_start = start_command_execution_item( + &conversation_id, + "turn-1".to_string(), + "cmd-1".to_string(), + completion_item.command.clone(), + completion_item.cwd.clone(), + completion_item.command_actions.clone(), + CommandExecutionSource::Agent, + &outgoing, + &thread_state, + ) + .await; + assert!(first_start); + + let msg = recv_broadcast_message(&mut rx).await?; + match msg { + OutgoingMessage::AppServerNotification(ServerNotification::ItemStarted(payload)) => { + assert_eq!(payload.thread_id, conversation_id.to_string()); + assert_eq!(payload.turn_id, "turn-1"); + assert_eq!( + payload.item, + ThreadItem::CommandExecution { + id: "cmd-1".to_string(), + command: completion_item.command.clone(), + cwd: completion_item.cwd.clone(), + process_id: None, + source: CommandExecutionSource::Agent, + status: CommandExecutionStatus::InProgress, + command_actions: completion_item.command_actions.clone(), + aggregated_output: None, + exit_code: None, + duration_ms: None, + } + ); + } + other => bail!("unexpected message: {other:?}"), + } + + let second_start = start_command_execution_item( + &conversation_id, + "turn-1".to_string(), + "cmd-1".to_string(), + completion_item.command.clone(), + completion_item.cwd.clone(), + completion_item.command_actions.clone(), + CommandExecutionSource::Agent, + &outgoing, + &thread_state, + ) + .await; + assert!(!second_start); + assert!(rx.try_recv().is_err(), "duplicate start should not emit"); + Ok(()) + } + + #[tokio::test] + async fn complete_command_execution_item_emits_declined_once_for_pending_command() -> Result<()> + { + let conversation_id = ThreadId::new(); + let thread_state = new_thread_state(); + let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY); + let outgoing = Arc::new(OutgoingMessageSender::new(tx)); + let outgoing = ThreadScopedOutgoingMessageSender::new( + outgoing, + vec![ConnectionId(1)], + ThreadId::new(), + ); + let completion_item = command_execution_completion_item("printf hi"); + + start_command_execution_item( + &conversation_id, + "turn-1".to_string(), + "cmd-1".to_string(), + completion_item.command.clone(), + completion_item.cwd.clone(), + completion_item.command_actions.clone(), + CommandExecutionSource::Agent, + &outgoing, + &thread_state, + ) + .await; + let _started = recv_broadcast_message(&mut rx).await?; + + complete_command_execution_item( + &conversation_id, + "turn-1".to_string(), + "cmd-1".to_string(), + completion_item.command.clone(), + completion_item.cwd.clone(), + /*process_id*/ None, + CommandExecutionSource::Agent, + completion_item.command_actions.clone(), + CommandExecutionStatus::Declined, + &outgoing, + &thread_state, + ) + .await; + + let completed = recv_broadcast_message(&mut rx).await?; + match completed { + OutgoingMessage::AppServerNotification(ServerNotification::ItemCompleted(payload)) => { + let ThreadItem::CommandExecution { id, status, .. } = payload.item else { + bail!("expected command execution completion"); + }; + assert_eq!(id, "cmd-1"); + assert_eq!(status, CommandExecutionStatus::Declined); + } + other => bail!("unexpected message: {other:?}"), + } + + complete_command_execution_item( + &conversation_id, + "turn-1".to_string(), + "cmd-1".to_string(), + completion_item.command, + completion_item.cwd, + /*process_id*/ None, + CommandExecutionSource::Agent, + completion_item.command_actions, + CommandExecutionStatus::Declined, + &outgoing, + &thread_state, + ) + .await; + assert!( + rx.try_recv().is_err(), + "completion should not emit after the pending item is cleared" + ); + Ok(()) + } + + #[tokio::test] + async fn guardian_command_execution_notifications_wrap_review_lifecycle() -> Result<()> { + let codex_home = TempDir::new()?; + let config = load_default_config_for_test(&codex_home).await; + let thread_manager = Arc::new( + codex_core::test_support::thread_manager_with_models_provider_and_home( + CodexAuth::create_dummy_chatgpt_auth_for_testing(), + config.model_provider.clone(), + config.codex_home.clone(), + Arc::new(codex_exec_server::EnvironmentManager::new( + /*exec_server_url*/ None, + )), + ), + ); + let codex_core::NewThread { + thread_id: conversation_id, + thread: conversation, + .. + } = thread_manager.start_thread(config).await?; + let thread_state = new_thread_state(); + let thread_watch_manager = ThreadWatchManager::new(); + let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY); + let outgoing = Arc::new(OutgoingMessageSender::new(tx)); + let outgoing = ThreadScopedOutgoingMessageSender::new( + outgoing, + vec![ConnectionId(1)], + conversation_id, + ); + let guardian_context = GuardianAssessmentTestContext { + conversation_id, + conversation: conversation.clone(), + thread_manager: thread_manager.clone(), + outgoing: outgoing.clone(), + thread_state: thread_state.clone(), + thread_watch_manager: thread_watch_manager.clone(), + codex_home: codex_home.path().to_path_buf(), + }; + + guardian_context + .apply_guardian_assessment_event(guardian_command_assessment( + "cmd-guardian-approved", + "turn-guardian-approved", + GuardianAssessmentStatus::InProgress, + )) + .await; + let first = recv_broadcast_message(&mut rx).await?; + match first { + OutgoingMessage::AppServerNotification(ServerNotification::ItemStarted(payload)) => { + assert_eq!(payload.turn_id, "turn-guardian-approved"); + let ThreadItem::CommandExecution { id, status, .. } = payload.item else { + bail!("expected command execution item"); + }; + assert_eq!(id, "cmd-guardian-approved"); + assert_eq!(status, CommandExecutionStatus::InProgress); + } + other => bail!("unexpected message: {other:?}"), + } + let second = recv_broadcast_message(&mut rx).await?; + match second { + OutgoingMessage::AppServerNotification( + ServerNotification::ItemGuardianApprovalReviewStarted(payload), + ) => { + assert_eq!(payload.target_item_id, "cmd-guardian-approved"); + assert_eq!( + payload.review.status, + GuardianApprovalReviewStatus::InProgress + ); + } + other => bail!("unexpected message: {other:?}"), + } + + guardian_context + .apply_guardian_assessment_event(guardian_command_assessment( + "cmd-guardian-approved", + "turn-guardian-approved", + GuardianAssessmentStatus::Approved, + )) + .await; + let third = recv_broadcast_message(&mut rx).await?; + match third { + OutgoingMessage::AppServerNotification( + ServerNotification::ItemGuardianApprovalReviewCompleted(payload), + ) => { + assert_eq!(payload.target_item_id, "cmd-guardian-approved"); + assert_eq!( + payload.review.status, + GuardianApprovalReviewStatus::Approved + ); + } + other => bail!("unexpected message: {other:?}"), + } + assert!( + rx.try_recv().is_err(), + "approved review should not complete the command item" + ); + + guardian_context + .apply_guardian_assessment_event(guardian_command_assessment( + "cmd-guardian-denied", + "turn-guardian-denied", + GuardianAssessmentStatus::InProgress, + )) + .await; + let fourth = recv_broadcast_message(&mut rx).await?; + match fourth { + OutgoingMessage::AppServerNotification(ServerNotification::ItemStarted(payload)) => { + assert_eq!(payload.turn_id, "turn-guardian-denied"); + let ThreadItem::CommandExecution { id, status, .. } = payload.item else { + bail!("expected command execution item"); + }; + assert_eq!(id, "cmd-guardian-denied"); + assert_eq!(status, CommandExecutionStatus::InProgress); + } + other => bail!("unexpected message: {other:?}"), + } + let fifth = recv_broadcast_message(&mut rx).await?; + match fifth { + OutgoingMessage::AppServerNotification( + ServerNotification::ItemGuardianApprovalReviewStarted(payload), + ) => { + assert_eq!(payload.target_item_id, "cmd-guardian-denied"); + assert_eq!( + payload.review.status, + GuardianApprovalReviewStatus::InProgress + ); + } + other => bail!("unexpected message: {other:?}"), + } + + guardian_context + .apply_guardian_assessment_event(guardian_command_assessment( + "cmd-guardian-denied", + "turn-guardian-denied", + GuardianAssessmentStatus::Denied, + )) + .await; + let sixth = recv_broadcast_message(&mut rx).await?; + match sixth { + OutgoingMessage::AppServerNotification( + ServerNotification::ItemGuardianApprovalReviewCompleted(payload), + ) => { + assert_eq!(payload.target_item_id, "cmd-guardian-denied"); + assert_eq!(payload.review.status, GuardianApprovalReviewStatus::Denied); + } + other => bail!("unexpected message: {other:?}"), + } + let seventh = recv_broadcast_message(&mut rx).await?; + match seventh { + OutgoingMessage::AppServerNotification(ServerNotification::ItemCompleted(payload)) => { + let ThreadItem::CommandExecution { id, status, .. } = payload.item else { + bail!("expected command execution completion"); + }; + assert_eq!(id, "cmd-guardian-denied"); + assert_eq!(status, CommandExecutionStatus::Declined); + } + other => bail!("unexpected message: {other:?}"), + } + + assert!(rx.try_recv().is_err(), "no extra messages expected"); + conversation.shutdown_and_wait().await?; + Ok(()) + } + #[test] fn file_change_accept_for_session_maps_to_approved_for_session() { let (decision, completion_status) = @@ -3280,10 +3718,25 @@ mod tests { ThreadId::new(), ); let thread_state = new_thread_state(); + { + let mut state = thread_state.lock().await; + state.track_current_turn_event(&EventMsg::TurnStarted( + codex_protocol::protocol::TurnStartedEvent { + turn_id: event_turn_id.clone(), + started_at: Some(42), + model_context_window: None, + collaboration_mode_kind: Default::default(), + }, + )); + state.track_current_turn_event(&EventMsg::TurnComplete(turn_complete_event( + &event_turn_id, + ))); + } handle_turn_complete( conversation_id, event_turn_id.clone(), + turn_complete_event(&event_turn_id), &outgoing, &thread_state, ) @@ -3295,6 +3748,9 @@ mod tests { assert_eq!(n.turn.id, event_turn_id); assert_eq!(n.turn.status, TurnStatus::Completed); assert_eq!(n.turn.error, None); + assert_eq!(n.turn.started_at, Some(42)); + assert_eq!(n.turn.completed_at, Some(TEST_TURN_COMPLETED_AT)); + assert_eq!(n.turn.duration_ms, Some(TEST_TURN_DURATION_MS)); } other => bail!("unexpected message: {other:?}"), } @@ -3328,6 +3784,7 @@ mod tests { handle_turn_interrupted( conversation_id, event_turn_id.clone(), + turn_aborted_event(&event_turn_id), &outgoing, &thread_state, ) @@ -3339,6 +3796,8 @@ mod tests { assert_eq!(n.turn.id, event_turn_id); assert_eq!(n.turn.status, TurnStatus::Interrupted); assert_eq!(n.turn.error, None); + assert_eq!(n.turn.completed_at, Some(TEST_TURN_COMPLETED_AT)); + assert_eq!(n.turn.duration_ms, Some(TEST_TURN_DURATION_MS)); } other => bail!("unexpected message: {other:?}"), } @@ -3372,6 +3831,7 @@ mod tests { handle_turn_complete( conversation_id, event_turn_id.clone(), + turn_complete_event(&event_turn_id), &outgoing, &thread_state, ) @@ -3390,6 +3850,8 @@ mod tests { additional_details: None, }) ); + assert_eq!(n.turn.completed_at, Some(TEST_TURN_COMPLETED_AT)); + assert_eq!(n.turn.duration_ms, Some(TEST_TURN_DURATION_MS)); } other => bail!("unexpected message: {other:?}"), } @@ -3632,7 +4094,14 @@ mod tests { &thread_state, ) .await; - handle_turn_complete(conversation_a, a_turn1.clone(), &outgoing, &thread_state).await; + handle_turn_complete( + conversation_a, + a_turn1.clone(), + turn_complete_event(&a_turn1), + &outgoing, + &thread_state, + ) + .await; // Turn 1 on conversation B let b_turn1 = "b_turn1".to_string(); @@ -3646,11 +4115,25 @@ mod tests { &thread_state, ) .await; - handle_turn_complete(conversation_b, b_turn1.clone(), &outgoing, &thread_state).await; + handle_turn_complete( + conversation_b, + b_turn1.clone(), + turn_complete_event(&b_turn1), + &outgoing, + &thread_state, + ) + .await; // Turn 2 on conversation A let a_turn2 = "a_turn2".to_string(); - handle_turn_complete(conversation_a, a_turn2.clone(), &outgoing, &thread_state).await; + handle_turn_complete( + conversation_a, + a_turn2.clone(), + turn_complete_event(&a_turn2), + &outgoing, + &thread_state, + ) + .await; // Verify: A turn 1 let msg = recv_broadcast_message(&mut rx).await?; @@ -3751,7 +4234,9 @@ mod tests { content: content.clone(), is_error: Some(false), structured_content: None, - meta: None, + meta: Some(serde_json::json!({ + "ui/resourceUri": "ui://widget/list-resources.html" + })), }; let end_event = McpToolCallEndEvent { @@ -3786,6 +4271,9 @@ mod tests { result: Some(McpToolCallResult { content, structured_content: None, + meta: Some(serde_json::json!({ + "ui/resourceUri": "ui://widget/list-resources.html" + })), }), error: None, duration_ms: Some(0), diff --git a/codex-rs/app-server/src/codex_message_processor.rs b/codex-rs/app-server/src/codex_message_processor.rs index 86c2218bc8..9282720828 100644 --- a/codex-rs/app-server/src/codex_message_processor.rs +++ b/codex-rs/app-server/src/codex_message_processor.rs @@ -76,11 +76,14 @@ use codex_app_server_protocol::LoginAccountResponse; use codex_app_server_protocol::LoginApiKeyParams; use codex_app_server_protocol::LogoutAccountResponse; use codex_app_server_protocol::MarketplaceInterface; +use codex_app_server_protocol::McpResourceReadParams; +use codex_app_server_protocol::McpResourceReadResponse; use codex_app_server_protocol::McpServerOauthLoginCompletedNotification; use codex_app_server_protocol::McpServerOauthLoginParams; use codex_app_server_protocol::McpServerOauthLoginResponse; use codex_app_server_protocol::McpServerRefreshResponse; use codex_app_server_protocol::McpServerStatus; +use codex_app_server_protocol::McpServerStatusDetail; use codex_app_server_protocol::MockExperimentalMethodParams; use codex_app_server_protocol::MockExperimentalMethodResponse; use codex_app_server_protocol::ModelListParams; @@ -245,11 +248,12 @@ use codex_login::default_client::set_default_client_residency_requirement; use codex_login::login_with_api_key; use codex_login::request_device_code; use codex_login::run_login_server; -use codex_mcp::mcp::auth::discover_supported_scopes; -use codex_mcp::mcp::auth::resolve_oauth_scopes; -use codex_mcp::mcp::collect_mcp_snapshot; -use codex_mcp::mcp::effective_mcp_servers; -use codex_mcp::mcp::qualified_mcp_tool_name_prefix; +use codex_mcp::McpSnapshotDetail; +use codex_mcp::collect_mcp_snapshot_with_detail; +use codex_mcp::discover_supported_scopes; +use codex_mcp::effective_mcp_servers; +use codex_mcp::qualified_mcp_tool_name_prefix; +use codex_mcp::resolve_oauth_scopes; use codex_models_manager::collaboration_mode_presets::CollaborationModesConfig; use codex_protocol::ThreadId; use codex_protocol::config_types::CollaborationMode; @@ -474,22 +478,13 @@ pub(crate) struct CodexMessageProcessorArgs { } impl CodexMessageProcessor { - pub(crate) fn clear_plugin_related_caches(&self) { - self.thread_manager.plugins_manager().clear_cache(); - self.thread_manager.skills_manager().clear_cache(); + pub(crate) fn handle_config_mutation(&self) { + self.clear_plugin_related_caches(); } - pub(crate) async fn maybe_start_plugin_startup_tasks_for_latest_config(&self) { - match self.load_latest_config(/*fallback_cwd*/ None).await { - Ok(config) => self - .thread_manager - .plugins_manager() - .maybe_start_plugin_startup_tasks_for_config( - &config, - self.thread_manager.auth_manager(), - ), - Err(err) => warn!("failed to load latest config for plugin startup tasks: {err:?}"), - } + fn clear_plugin_related_caches(&self) { + self.thread_manager.plugins_manager().clear_cache(); + self.thread_manager.skills_manager().clear_cache(); } fn current_account_updated_notification(&self) -> AccountUpdatedNotification { @@ -889,6 +884,10 @@ impl CodexMessageProcessor { self.list_mcp_server_status(to_connection_request_id(request_id), params) .await; } + ClientRequest::McpResourceRead { request_id, params } => { + self.read_mcp_resource(to_connection_request_id(request_id), params) + .await; + } ClientRequest::WindowsSandboxSetupStart { request_id, params } => { self.windows_sandbox_setup_start(to_connection_request_id(request_id), params) .await; @@ -2474,8 +2473,8 @@ impl CodexMessageProcessor { approval_policy: Option, approvals_reviewer: Option, sandbox: Option, - base_instructions: Option, - developer_instructions: Option, + base_instructions: Option>, + developer_instructions: Option>, personality: Option, ) -> ConfigOverrides { ConfigOverrides { @@ -2754,24 +2753,6 @@ impl CodexMessageProcessor { if state_db_ctx.is_none() { state_db_ctx = get_state_db(&self.config).await; } - if state_db_ctx.is_none() { - match StateRuntime::init( - self.config.sqlite_home.clone(), - self.config.model_provider_id.clone(), - ) - .await - { - Ok(ctx) => { - state_db_ctx = Some(ctx); - } - Err(err) => { - warn!( - "failed to initialize state db for thread metadata update at {}: {err}", - self.config.sqlite_home.display() - ); - } - } - } let Some(state_db_ctx) = state_db_ctx else { self.send_internal_error( request_id, @@ -4382,6 +4363,13 @@ impl CodexMessageProcessor { developer_instructions, /*personality*/ None, ); + if typesafe_overrides.base_instructions.is_none() + && let Ok(history) = RolloutRecorder::get_rollout_history(&rollout_path).await + && let Some(base_instructions) = history.get_base_instructions() + { + typesafe_overrides.base_instructions = + Some(base_instructions.map(|base_instructions| base_instructions.text)); + } typesafe_overrides.ephemeral = ephemeral.then_some(true); // Derive a Config using the same logic as new conversation, honoring overrides if provided. let cloud_requirements = self.current_cloud_requirements(); @@ -5187,13 +5175,19 @@ impl CodexMessageProcessor { request_id: ConnectionRequestId, params: ListMcpServerStatusParams, config: Config, - mcp_config: codex_mcp::mcp::McpConfig, + mcp_config: codex_mcp::McpConfig, auth: Option, ) { - let snapshot = collect_mcp_snapshot( + let detail = match params.detail.unwrap_or(McpServerStatusDetail::Full) { + McpServerStatusDetail::Full => McpSnapshotDetail::Full, + McpServerStatusDetail::ToolsAndAuthOnly => McpSnapshotDetail::ToolsAndAuthOnly, + }; + + let snapshot = collect_mcp_snapshot_with_detail( &mcp_config, auth.as_ref(), request_id.request_id.to_string(), + detail, ) .await; @@ -5316,6 +5310,58 @@ impl CodexMessageProcessor { outgoing.send_response(request_id, response).await; } + async fn read_mcp_resource( + &self, + request_id: ConnectionRequestId, + params: McpResourceReadParams, + ) { + let outgoing = Arc::clone(&self.outgoing); + let (_, thread) = match self.load_thread(¶ms.thread_id).await { + Ok(thread) => thread, + Err(error) => { + self.outgoing.send_error(request_id, error).await; + return; + } + }; + + tokio::spawn(async move { + let result = thread.read_mcp_resource(¶ms.server, ¶ms.uri).await; + match result { + Ok(result) => match serde_json::from_value::(result) { + Ok(response) => { + outgoing.send_response(request_id, response).await; + } + Err(error) => { + outgoing + .send_error( + request_id, + JSONRPCErrorError { + code: INTERNAL_ERROR_CODE, + message: format!( + "failed to deserialize MCP resource read response: {error}" + ), + data: None, + }, + ) + .await; + } + }, + Err(error) => { + outgoing + .send_error( + request_id, + JSONRPCErrorError { + code: INTERNAL_ERROR_CODE, + message: format!("{error:#}"), + data: None, + }, + ) + .await; + } + } + }); + } + async fn send_invalid_request_error(&self, request_id: ConnectionRequestId, message: String) { let error = JSONRPCErrorError { code: INVALID_REQUEST_ERROR_CODE, @@ -5651,7 +5697,11 @@ impl CodexMessageProcessor { .set_enabled(Feature::Apps, thread.enabled(Feature::Apps)); } - if !config.features.apps_enabled(Some(&self.auth_manager)).await { + let auth = self.auth_manager.auth().await; + if !config + .features + .apps_enabled_for_auth(auth.as_ref().is_some_and(CodexAuth::is_chatgpt_auth)) + { self.outgoing .send_response( request_id, @@ -5982,6 +6032,7 @@ impl CodexMessageProcessor { force_remote_sync, } = params; let roots = cwds.unwrap_or_default(); + plugins_manager.maybe_start_non_curated_plugin_cache_refresh_for_roots(&roots); let mut config = match self.load_latest_config(/*fallback_cwd*/ None).await { Ok(config) => config, @@ -6318,9 +6369,11 @@ impl CodexMessageProcessor { } let plugin_apps = load_plugin_apps(result.installed_path.as_path()); + let auth = self.auth_manager.auth().await; let apps_needing_auth = if plugin_apps.is_empty() - || !config.features.apps_enabled(Some(&self.auth_manager)).await - { + || !config.features.apps_enabled_for_auth( + auth.as_ref().is_some_and(CodexAuth::is_chatgpt_auth), + ) { Vec::new() } else { let (all_connectors_result, accessible_connectors_result) = tokio::join!( @@ -6611,6 +6664,9 @@ impl CodexMessageProcessor { items: vec![], error: None, status: TurnStatus::InProgress, + started_at: None, + completed_at: None, + duration_ms: None, }; let response = TurnStartResponse { turn }; @@ -6947,6 +7003,9 @@ impl CodexMessageProcessor { items, error: None, status: TurnStatus::InProgress, + started_at: None, + completed_at: None, + duration_ms: None, } } @@ -7613,42 +7672,101 @@ impl CodexMessageProcessor { } let snapshot = self.feedback.snapshot(conversation_id); let thread_id = snapshot.thread_id.clone(); - let sqlite_feedback_logs = if include_logs { + let (feedback_thread_ids, sqlite_feedback_logs, state_db_ctx) = if include_logs { if let Some(log_db) = self.log_db.as_ref() { log_db.flush().await; } let state_db_ctx = get_state_db(&self.config).await; - match (state_db_ctx.as_ref(), conversation_id) { - (Some(state_db_ctx), Some(conversation_id)) => { - let thread_id_text = conversation_id.to_string(); - match state_db_ctx.query_feedback_logs(&thread_id_text).await { - Ok(logs) if logs.is_empty() => None, - Ok(logs) => Some(logs), - Err(err) => { - warn!( - "failed to query feedback logs from sqlite for thread_id={thread_id_text}: {err}" - ); - None + let feedback_thread_ids = match conversation_id { + Some(conversation_id) => match self + .thread_manager + .list_agent_subtree_thread_ids(conversation_id) + .await + { + Ok(thread_ids) => thread_ids, + Err(err) => { + warn!( + "failed to list feedback subtree for thread_id={conversation_id}: {err}" + ); + let mut thread_ids = vec![conversation_id]; + if let Some(state_db_ctx) = state_db_ctx.as_ref() { + for status in [ + codex_state::DirectionalThreadSpawnEdgeStatus::Open, + codex_state::DirectionalThreadSpawnEdgeStatus::Closed, + ] { + match state_db_ctx + .list_thread_spawn_descendants_with_status( + conversation_id, + status, + ) + .await + { + Ok(descendant_ids) => thread_ids.extend(descendant_ids), + Err(err) => warn!( + "failed to list persisted feedback subtree for thread_id={conversation_id}: {err}" + ), + } + } } + thread_ids + } + }, + None => Vec::new(), + }; + let sqlite_feedback_logs = if let Some(state_db_ctx) = state_db_ctx.as_ref() + && !feedback_thread_ids.is_empty() + { + let thread_id_texts = feedback_thread_ids + .iter() + .map(ToString::to_string) + .collect::>(); + let thread_id_refs = thread_id_texts + .iter() + .map(String::as_str) + .collect::>(); + match state_db_ctx + .query_feedback_logs_for_threads(&thread_id_refs) + .await + { + Ok(logs) if logs.is_empty() => None, + Ok(logs) => Some(logs), + Err(err) => { + let thread_ids = thread_id_texts.join(", "); + warn!( + "failed to query feedback logs from sqlite for thread_ids=[{thread_ids}]: {err}" + ); + None } } - _ => None, - } + } else { + None + }; + (feedback_thread_ids, sqlite_feedback_logs, state_db_ctx) } else { - None + (Vec::new(), None, None) }; - let validated_rollout_path = if include_logs { - match conversation_id { - Some(conv_id) => self.resolve_rollout_path(conv_id).await, - None => None, + let mut attachment_paths = Vec::new(); + let mut seen_attachment_paths = HashSet::new(); + if include_logs { + for feedback_thread_id in &feedback_thread_ids { + let Some(rollout_path) = self + .resolve_rollout_path(*feedback_thread_id, state_db_ctx.as_ref()) + .await + else { + continue; + }; + if seen_attachment_paths.insert(rollout_path.clone()) { + attachment_paths.push(rollout_path); + } } - } else { - None - }; - let mut attachment_paths = validated_rollout_path.into_iter().collect::>(); + } if let Some(extra_log_files) = extra_log_files { - attachment_paths.extend(extra_log_files); + for extra_log_file in extra_log_files { + if seen_attachment_paths.insert(extra_log_file.clone()) { + attachment_paths.push(extra_log_file); + } + } } let session_source = self.thread_manager.session_source(); @@ -7767,11 +7885,25 @@ impl CodexMessageProcessor { }); } - async fn resolve_rollout_path(&self, conversation_id: ThreadId) -> Option { - match self.thread_manager.get_thread(conversation_id).await { - Ok(conv) => conv.rollout_path(), - Err(_) => None, + async fn resolve_rollout_path( + &self, + conversation_id: ThreadId, + state_db_ctx: Option<&StateDbHandle>, + ) -> Option { + if let Ok(conversation) = self.thread_manager.get_thread(conversation_id).await + && let Some(rollout_path) = conversation.rollout_path() + { + return Some(rollout_path); } + + let state_db_ctx = state_db_ctx?; + state_db_ctx + .find_rollout_path_by_id(conversation_id, /*archived_only*/ None) + .await + .unwrap_or_else(|err| { + warn!("failed to resolve rollout path for thread_id={conversation_id}: {err}"); + None + }) } } @@ -9596,6 +9728,7 @@ mod tests { state.track_current_turn_event(&EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }, diff --git a/codex-rs/app-server/src/codex_message_processor/plugin_mcp_oauth.rs b/codex-rs/app-server/src/codex_message_processor/plugin_mcp_oauth.rs index 58f4a5e8dd..b027aef453 100644 --- a/codex-rs/app-server/src/codex_message_processor/plugin_mcp_oauth.rs +++ b/codex-rs/app-server/src/codex_message_processor/plugin_mcp_oauth.rs @@ -5,10 +5,10 @@ use codex_app_server_protocol::McpServerOauthLoginCompletedNotification; use codex_app_server_protocol::ServerNotification; use codex_config::types::McpServerConfig; use codex_core::config::Config; -use codex_mcp::mcp::auth::McpOAuthLoginSupport; -use codex_mcp::mcp::auth::oauth_login_support; -use codex_mcp::mcp::auth::resolve_oauth_scopes; -use codex_mcp::mcp::auth::should_retry_without_scopes; +use codex_mcp::McpOAuthLoginSupport; +use codex_mcp::oauth_login_support; +use codex_mcp::resolve_oauth_scopes; +use codex_mcp::should_retry_without_scopes; use codex_rmcp_client::perform_oauth_login_silent; use tracing::warn; diff --git a/codex-rs/app-server/src/config_api.rs b/codex-rs/app-server/src/config_api.rs index ec2cf82cc1..4f9a800243 100644 --- a/codex-rs/app-server/src/config_api.rs +++ b/codex-rs/app-server/src/config_api.rs @@ -449,6 +449,7 @@ fn map_network_requirements_to_api( .collect() }), managed_allowed_domains_only: network.managed_allowed_domains_only, + danger_full_access_denylist_only: network.danger_full_access_denylist_only, allowed_domains, denied_domains, unix_sockets: network.unix_sockets.map(|unix_sockets| { @@ -594,6 +595,7 @@ mod tests { ]), }), managed_allowed_domains_only: Some(false), + danger_full_access_denylist_only: Some(true), unix_sockets: Some(CoreNetworkUnixSocketPermissionsToml { entries: std::collections::BTreeMap::from([( "/tmp/proxy.sock".to_string(), @@ -653,6 +655,7 @@ mod tests { ("example.com".to_string(), NetworkDomainPermission::Deny), ])), managed_allowed_domains_only: Some(false), + danger_full_access_denylist_only: Some(true), allowed_domains: Some(vec!["api.openai.com".to_string()]), denied_domains: Some(vec!["example.com".to_string()]), unix_sockets: Some(std::collections::BTreeMap::from([( @@ -687,6 +690,7 @@ mod tests { dangerously_allow_all_unix_sockets: None, domains: None, managed_allowed_domains_only: None, + danger_full_access_denylist_only: None, unix_sockets: Some(CoreNetworkUnixSocketPermissionsToml { entries: std::collections::BTreeMap::from([( "/tmp/ignored.sock".to_string(), @@ -710,6 +714,7 @@ mod tests { dangerously_allow_all_unix_sockets: None, domains: None, managed_allowed_domains_only: None, + danger_full_access_denylist_only: None, allowed_domains: None, denied_domains: None, unix_sockets: Some(std::collections::BTreeMap::from([( diff --git a/codex-rs/app-server/src/fs_watch.rs b/codex-rs/app-server/src/fs_watch.rs index 309bee4a64..3a5b226248 100644 --- a/codex-rs/app-server/src/fs_watch.rs +++ b/codex-rs/app-server/src/fs_watch.rs @@ -1,3 +1,4 @@ +use crate::fs_api::invalid_request; use crate::outgoing_message::ConnectionId; use crate::outgoing_message::OutgoingMessageSender; use codex_app_server_protocol::FsChangedNotification; @@ -16,6 +17,7 @@ use codex_core::file_watcher::WatchRegistration; use codex_utils_absolute_path::AbsolutePathBuf; use std::collections::HashMap; use std::collections::HashSet; +use std::collections::hash_map::Entry; use std::hash::Hash; use std::path::PathBuf; use std::sync::Arc; @@ -26,7 +28,6 @@ use tokio::sync::mpsc; use tokio::sync::oneshot; use tokio::time::Instant; use tracing::warn; -use uuid::Uuid; const FS_CHANGED_NOTIFICATION_DEBOUNCE: Duration = Duration::from_millis(200); @@ -120,7 +121,11 @@ impl FsWatchManager { connection_id: ConnectionId, params: FsWatchParams, ) -> Result { - let watch_id = Uuid::now_v7().to_string(); + let watch_id = params.watch_id; + let watch_key = WatchKey { + connection_id, + watch_id: watch_id.clone(), + }; let outgoing = self.outgoing.clone(); let (subscriber, rx) = self.file_watcher.add_subscriber(); let watch_root = params.path.to_path_buf().clone(); @@ -130,17 +135,20 @@ impl FsWatchManager { }]); let (terminate_tx, terminate_rx) = oneshot::channel(); - self.state.lock().await.entries.insert( - WatchKey { - connection_id, - watch_id: watch_id.clone(), - }, - WatchEntry { - terminate_tx, - _subscriber: subscriber, - _registration: registration, - }, - ); + match self.state.lock().await.entries.entry(watch_key) { + Entry::Occupied(_) => { + return Err(invalid_request(format!( + "watchId already exists: {watch_id}" + ))); + } + Entry::Vacant(entry) => { + entry.insert(WatchEntry { + terminate_tx, + _subscriber: subscriber, + _registration: registration, + }); + } + } let task_watch_id = watch_id.clone(); tokio::spawn(async move { @@ -158,19 +166,7 @@ impl FsWatchManager { let mut changed_paths = event .paths .into_iter() - .filter_map(|path| { - match AbsolutePathBuf::resolve_path_against_base(&path, &watch_root) { - Ok(path) => Some(path), - Err(err) => { - warn!( - "failed to normalize watch event path ({}) for {}: {err}", - path.display(), - watch_root.display() - ); - None - } - } - }) + .map(|path| AbsolutePathBuf::resolve_path_against_base(&path, &watch_root)) .collect::>(); changed_paths.sort_by(|left, right| left.as_path().cmp(right.as_path())); if !changed_paths.is_empty() { @@ -187,10 +183,7 @@ impl FsWatchManager { } }); - Ok(FsWatchResponse { - watch_id, - path: params.path, - }) + Ok(FsWatchResponse { path: params.path }) } pub(crate) async fn unwatch( @@ -228,7 +221,6 @@ mod tests { use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use tempfile::TempDir; - use uuid::Version; fn absolute_path(path: PathBuf) -> AbsolutePathBuf { assert!( @@ -249,28 +241,33 @@ mod tests { } #[tokio::test] - async fn watch_returns_a_v7_id_and_tracks_the_owner_scoped_entry() { + async fn watch_uses_client_id_and_tracks_the_owner_scoped_entry() { let temp_dir = TempDir::new().expect("temp dir"); let head_path = temp_dir.path().join("HEAD"); std::fs::write(&head_path, "ref: refs/heads/main\n").expect("write HEAD"); let manager = manager_with_noop_watcher(); let path = absolute_path(head_path); + let watch_id = "watch-head".to_string(); let response = manager - .watch(ConnectionId(1), FsWatchParams { path: path.clone() }) + .watch( + ConnectionId(1), + FsWatchParams { + watch_id: watch_id.clone(), + path: path.clone(), + }, + ) .await .expect("watch should succeed"); assert_eq!(response.path, path); - let watch_id = Uuid::parse_str(&response.watch_id).expect("watch id should be a UUID"); - assert_eq!(watch_id.get_version(), Some(Version::SortRand)); let state = manager.state.lock().await; assert_eq!( state.entries.keys().cloned().collect::>(), HashSet::from([WatchKey { connection_id: ConnectionId(1), - watch_id: response.watch_id, + watch_id, }]) ); } @@ -282,10 +279,11 @@ mod tests { std::fs::write(&head_path, "ref: refs/heads/main\n").expect("write HEAD"); let manager = manager_with_noop_watcher(); - let response = manager + manager .watch( ConnectionId(1), FsWatchParams { + watch_id: "watch-head".to_string(), path: absolute_path(head_path), }, ) @@ -293,14 +291,14 @@ mod tests { .expect("watch should succeed"); let watch_key = WatchKey { connection_id: ConnectionId(1), - watch_id: response.watch_id.clone(), + watch_id: "watch-head".to_string(), }; manager .unwatch( ConnectionId(2), FsUnwatchParams { - watch_id: response.watch_id.clone(), + watch_id: "watch-head".to_string(), }, ) .await @@ -311,7 +309,7 @@ mod tests { .unwatch( ConnectionId(1), FsUnwatchParams { - watch_id: response.watch_id, + watch_id: "watch-head".to_string(), }, ) .await @@ -319,6 +317,41 @@ mod tests { assert!(!manager.state.lock().await.entries.contains_key(&watch_key)); } + #[tokio::test] + async fn watch_rejects_duplicate_id_for_the_same_connection() { + let temp_dir = TempDir::new().expect("temp dir"); + let head_path = temp_dir.path().join("HEAD"); + let fetch_head_path = temp_dir.path().join("FETCH_HEAD"); + std::fs::write(&head_path, "ref: refs/heads/main\n").expect("write HEAD"); + std::fs::write(&fetch_head_path, "old-fetch\n").expect("write FETCH_HEAD"); + + let manager = manager_with_noop_watcher(); + manager + .watch( + ConnectionId(1), + FsWatchParams { + watch_id: "watch-head".to_string(), + path: absolute_path(head_path), + }, + ) + .await + .expect("first watch should succeed"); + + let error = manager + .watch( + ConnectionId(1), + FsWatchParams { + watch_id: "watch-head".to_string(), + path: absolute_path(fetch_head_path), + }, + ) + .await + .expect_err("duplicate watch should fail"); + + assert_eq!(error.message, "watchId already exists: watch-head"); + assert_eq!(manager.state.lock().await.entries.len(), 1); + } + #[tokio::test] async fn connection_closed_removes_only_that_connections_watches() { let temp_dir = TempDir::new().expect("temp dir"); @@ -330,28 +363,31 @@ mod tests { std::fs::write(&packed_refs_path, "refs\n").expect("write packed-refs"); let manager = manager_with_noop_watcher(); - let response_1 = manager + let response = manager .watch( ConnectionId(1), FsWatchParams { - path: absolute_path(head_path), + watch_id: "watch-head".to_string(), + path: absolute_path(head_path.clone()), }, ) .await .expect("first watch should succeed"); - let response_2 = manager + manager .watch( ConnectionId(1), FsWatchParams { + watch_id: "watch-fetch-head".to_string(), path: absolute_path(fetch_head_path), }, ) .await .expect("second watch should succeed"); - let response_3 = manager + manager .watch( ConnectionId(2), FsWatchParams { + watch_id: "watch-packed-refs".to_string(), path: absolute_path(packed_refs_path), }, ) @@ -371,9 +407,9 @@ mod tests { .collect::>(), HashSet::from([WatchKey { connection_id: ConnectionId(2), - watch_id: response_3.watch_id, + watch_id: "watch-packed-refs".to_string(), }]) ); - assert_ne!(response_1.watch_id, response_2.watch_id); + assert_eq!(response.path, absolute_path(head_path)); } } diff --git a/codex-rs/app-server/src/in_process.rs b/codex-rs/app-server/src/in_process.rs index eda2c75da8..7bddd8a714 100644 --- a/codex-rs/app-server/src/in_process.rs +++ b/codex-rs/app-server/src/in_process.rs @@ -80,6 +80,7 @@ use codex_core::config_loader::CloudRequirementsLoader; use codex_core::config_loader::LoaderOverrides; use codex_exec_server::EnvironmentManager; use codex_feedback::CodexFeedback; +use codex_login::AuthManager; use codex_protocol::protocol::SessionSource; use tokio::sync::mpsc; use tokio::sync::oneshot; @@ -379,6 +380,8 @@ fn start_uninitialized(args: InProcessStartArgs) -> InProcessClientHandle { }); let processor_outgoing = Arc::clone(&outgoing_message_sender); + let auth_manager = + AuthManager::shared_from_config(args.config.as_ref(), args.enable_codex_api_key_env); let (processor_tx, mut processor_rx) = mpsc::channel::(channel_capacity); let mut processor_handle = tokio::spawn(async move { let mut processor = MessageProcessor::new(MessageProcessorArgs { @@ -393,8 +396,9 @@ fn start_uninitialized(args: InProcessStartArgs) -> InProcessClientHandle { log_db: None, config_warnings: args.config_warnings, session_source: args.session_source, - enable_codex_api_key_env: args.enable_codex_api_key_env, + auth_manager, rpc_transport: AppServerRpcTransport::InProcess, + remote_control_handle: None, }); let mut thread_created_rx = processor.thread_created_receiver(); let mut session = ConnectionSessionState::default(); @@ -823,6 +827,9 @@ mod tests { items: Vec::new(), status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: Some(0), + duration_ms: None, }, }) )); diff --git a/codex-rs/app-server/src/lib.rs b/codex-rs/app-server/src/lib.rs index 8c3742adcc..a837d9c754 100644 --- a/codex-rs/app-server/src/lib.rs +++ b/codex-rs/app-server/src/lib.rs @@ -7,6 +7,8 @@ use codex_core::config::ConfigBuilder; use codex_core::config_loader::CloudRequirementsLoader; use codex_core::config_loader::ConfigLayerStackOrdering; use codex_core::config_loader::LoaderOverrides; +use codex_features::Feature; +use codex_login::AuthManager; use codex_utils_cli::CliConfigOverrides; use std::collections::HashMap; use std::collections::HashSet; @@ -28,6 +30,7 @@ use crate::transport::OutboundConnectionState; use crate::transport::TransportEvent; use crate::transport::auth::policy_from_settings; use crate::transport::route_outgoing_envelope; +use crate::transport::start_remote_control; use crate::transport::start_stdio_connection; use crate::transport::start_websocket_acceptor; use codex_analytics::AppServerRpcTransport; @@ -42,10 +45,10 @@ use codex_core::config_loader::ConfigLoadError; use codex_core::config_loader::TextRange as CoreTextRange; use codex_exec_server::EnvironmentManager; use codex_feedback::CodexFeedback; -use codex_login::AuthManager; use codex_protocol::protocol::SessionSource; use codex_state::log_db; use tokio::sync::mpsc; +use tokio::sync::oneshot; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use toml::Value as TomlValue; @@ -396,11 +399,8 @@ pub async fn run_main_with_transport( } } - let auth_manager = AuthManager::shared( - config.codex_home.clone(), - /*enable_codex_api_key_env*/ false, - config.cli_auth_credentials_store_mode, - ); + let auth_manager = + AuthManager::shared_from_config(&config, /*enable_codex_api_key_env*/ false); cloud_requirements_loader( auth_manager, config.chatgpt_base_url, @@ -502,13 +502,13 @@ pub async fn run_main_with_transport( let feedback_layer = feedback.logger_layer(); let feedback_metadata_layer = feedback.metadata_layer(); - let log_db = codex_state::StateRuntime::init( + let state_db = codex_state::StateRuntime::init( config.sqlite_home.clone(), config.model_provider_id.clone(), ) .await - .ok() - .map(log_db::start); + .ok(); + let log_db = state_db.clone().map(log_db::start); let log_db_layer = log_db .clone() .map(|layer| layer.with_filter(Targets::new().with_default(Level::TRACE))); @@ -535,11 +535,18 @@ pub async fn run_main_with_transport( let single_client_mode = matches!(&transport, AppServerTransport::Stdio); let shutdown_when_no_connections = single_client_mode; let graceful_signal_restart_enabled = !single_client_mode; + let mut app_server_client_name_rx = None; match transport { AppServerTransport::Stdio => { - start_stdio_connection(transport_event_tx.clone(), &mut transport_accept_handles) - .await?; + let (stdio_client_name_tx, stdio_client_name_rx) = oneshot::channel::(); + app_server_client_name_rx = Some(stdio_client_name_rx); + start_stdio_connection( + transport_event_tx.clone(), + &mut transport_accept_handles, + stdio_client_name_tx, + ) + .await?; } AppServerTransport::WebSocket { bind_address } => { let accept_handle = start_websocket_acceptor( @@ -551,8 +558,32 @@ pub async fn run_main_with_transport( .await?; transport_accept_handles.push(accept_handle); } + AppServerTransport::Off => {} } + let auth_manager = + AuthManager::shared_from_config(&config, /*enable_codex_api_key_env*/ false); + + let remote_control_enabled = config.features.enabled(Feature::RemoteControl); + if transport_accept_handles.is_empty() && !remote_control_enabled { + return Err(std::io::Error::new( + ErrorKind::InvalidInput, + "no transport configured; use --listen or enable remote control", + )); + } + + let (remote_control_accept_handle, remote_control_handle) = start_remote_control( + config.chatgpt_base_url.clone(), + state_db.clone(), + auth_manager.clone(), + transport_event_tx.clone(), + transport_shutdown_token.clone(), + app_server_client_name_rx, + remote_control_enabled, + ) + .await?; + transport_accept_handles.push(remote_control_accept_handle); + let outbound_handle = tokio::spawn(async move { let mut outbound_connections = HashMap::::new(); loop { @@ -611,6 +642,8 @@ pub async fn run_main_with_transport( let processor_handle = tokio::spawn({ let outgoing_message_sender = Arc::new(OutgoingMessageSender::new(outgoing_tx)); let outbound_control_tx = outbound_control_tx; + let auth_manager = + AuthManager::shared_from_config(&config, /*enable_codex_api_key_env*/ false); let cli_overrides: Vec<(String, TomlValue)> = cli_kv_overrides.clone(); let loader_overrides = loader_overrides_for_config_api; let mut processor = MessageProcessor::new(MessageProcessorArgs { @@ -625,8 +658,9 @@ pub async fn run_main_with_transport( log_db, config_warnings, session_source, - enable_codex_api_key_env: false, + auth_manager, rpc_transport: analytics_rpc_transport(transport), + remote_control_handle: Some(remote_control_handle), }); let mut thread_created_rx = processor.thread_created_receiver(); let mut running_turn_count_rx = processor.subscribe_running_assistant_turn_count(); @@ -853,7 +887,9 @@ pub async fn run_main_with_transport( fn analytics_rpc_transport(transport: AppServerTransport) -> AppServerRpcTransport { match transport { AppServerTransport::Stdio => AppServerRpcTransport::Stdio, - AppServerTransport::WebSocket { .. } => AppServerRpcTransport::Websocket, + AppServerTransport::WebSocket { .. } | AppServerTransport::Off => { + AppServerRpcTransport::Websocket + } } } diff --git a/codex-rs/app-server/src/main.rs b/codex-rs/app-server/src/main.rs index fa95f973ea..9a23680fb9 100644 --- a/codex-rs/app-server/src/main.rs +++ b/codex-rs/app-server/src/main.rs @@ -16,7 +16,7 @@ const MANAGED_CONFIG_PATH_ENV_VAR: &str = "CODEX_APP_SERVER_MANAGED_CONFIG_PATH" #[derive(Debug, Parser)] struct AppServerArgs { /// Transport endpoint URL. Supported values: `stdio://` (default), - /// `ws://IP:PORT`. + /// `ws://IP:PORT`, `off`. #[arg( long = "listen", value_name = "URL", diff --git a/codex-rs/app-server/src/message_processor.rs b/codex-rs/app-server/src/message_processor.rs index 02b948bc1f..fbc1bd6d10 100644 --- a/codex-rs/app-server/src/message_processor.rs +++ b/codex-rs/app-server/src/message_processor.rs @@ -19,6 +19,7 @@ use crate::outgoing_message::ConnectionRequestId; use crate::outgoing_message::OutgoingMessageSender; use crate::outgoing_message::RequestContext; use crate::transport::AppServerTransport; +use crate::transport::RemoteControlHandle; use async_trait::async_trait; use codex_analytics::AnalyticsEventsClient; use codex_analytics::AppServerRpcTransport; @@ -170,6 +171,7 @@ pub(crate) struct MessageProcessor { config: Arc, config_warnings: Arc>, rpc_transport: AppServerRpcTransport, + remote_control_handle: Option, } #[derive(Clone, Debug, Default)] @@ -193,8 +195,9 @@ pub(crate) struct MessageProcessorArgs { pub(crate) log_db: Option, pub(crate) config_warnings: Vec, pub(crate) session_source: SessionSource, - pub(crate) enable_codex_api_key_env: bool, + pub(crate) auth_manager: Arc, pub(crate) rpc_transport: AppServerRpcTransport, + pub(crate) remote_control_handle: Option, } impl MessageProcessor { @@ -213,17 +216,13 @@ impl MessageProcessor { log_db, config_warnings, session_source, - enable_codex_api_key_env, + auth_manager, rpc_transport, + remote_control_handle, } = args; - let auth_manager = AuthManager::shared_with_external_auth( - config.codex_home.clone(), - enable_codex_api_key_env, - config.cli_auth_credentials_store_mode, - Arc::new(ExternalAuthRefreshBridge { - outgoing: outgoing.clone(), - }), - ); + auth_manager.set_external_auth(Arc::new(ExternalAuthRefreshBridge { + outgoing: outgoing.clone(), + })); let thread_manager = Arc::new(ThreadManager::new( config.as_ref(), auth_manager.clone(), @@ -235,7 +234,6 @@ impl MessageProcessor { }, environment_manager, )); - auth_manager.set_forced_chatgpt_workspace_id(config.forced_chatgpt_workspace_id.clone()); let analytics_events_client = AnalyticsEventsClient::new( Arc::clone(&auth_manager), config.chatgpt_base_url.trim_end_matches('/').to_string(), @@ -291,6 +289,7 @@ impl MessageProcessor { config, config_warnings: Arc::new(config_warnings), rpc_transport, + remote_control_handle, } } @@ -871,16 +870,8 @@ impl MessageProcessor { request_id: ConnectionRequestId, params: ConfigValueWriteParams, ) { - match self.config_api.write_value(params).await { - Ok(response) => { - self.codex_message_processor.clear_plugin_related_caches(); - self.codex_message_processor - .maybe_start_plugin_startup_tasks_for_latest_config() - .await; - self.outgoing.send_response(request_id, response).await; - } - Err(error) => self.outgoing.send_error(request_id, error).await, - } + let result = self.config_api.write_value(params).await; + self.handle_config_mutation_result(request_id, result).await } async fn handle_config_batch_write( @@ -888,8 +879,8 @@ impl MessageProcessor { request_id: ConnectionRequestId, params: ConfigBatchWriteParams, ) { - self.handle_config_mutation_result(request_id, self.config_api.batch_write(params).await) - .await; + let result = self.config_api.batch_write(params).await; + self.handle_config_mutation_result(request_id, result).await; } async fn handle_experimental_feature_enablement_set( @@ -898,23 +889,15 @@ impl MessageProcessor { params: ExperimentalFeatureEnablementSetParams, ) { let should_refresh_apps_list = params.enablement.get("apps").copied() == Some(true); - match self + let result = self .config_api .set_experimental_feature_enablement(params) - .await - { - Ok(response) => { - self.codex_message_processor.clear_plugin_related_caches(); - self.codex_message_processor - .maybe_start_plugin_startup_tasks_for_latest_config() - .await; - self.outgoing.send_response(request_id, response).await; - if should_refresh_apps_list { - self.refresh_apps_list_after_experimental_feature_enablement_set() - .await; - } - } - Err(error) => self.outgoing.send_error(request_id, error).await, + .await; + let is_ok = result.is_ok(); + self.handle_config_mutation_result(request_id, result).await; + if should_refresh_apps_list && is_ok { + self.refresh_apps_list_after_experimental_feature_enablement_set() + .await; } } @@ -933,7 +916,11 @@ impl MessageProcessor { return; } }; - if !config.features.apps_enabled(Some(&self.auth_manager)).await { + let auth = self.auth_manager.auth().await; + if !config.features.apps_enabled_for_auth( + auth.as_ref() + .is_some_and(codex_login::CodexAuth::is_chatgpt_auth), + ) { return; } @@ -987,16 +974,36 @@ impl MessageProcessor { ) { match result { Ok(response) => { - self.codex_message_processor.clear_plugin_related_caches(); - self.codex_message_processor - .maybe_start_plugin_startup_tasks_for_latest_config() - .await; + self.handle_config_mutation().await; self.outgoing.send_response(request_id, response).await; } Err(error) => self.outgoing.send_error(request_id, error).await, } } + async fn handle_config_mutation(&self) { + self.codex_message_processor.handle_config_mutation(); + let Some(remote_control_handle) = &self.remote_control_handle else { + return; + }; + + match self + .config_api + .load_latest_config(/*fallback_cwd*/ None) + .await + { + Ok(config) => { + remote_control_handle.set_enabled(config.features.enabled(Feature::RemoteControl)); + } + Err(error) => { + tracing::warn!( + "failed to load config for remote control enablement refresh after config mutation: {}", + error.message + ); + } + } + } + async fn handle_config_requirements_read(&self, request_id: ConnectionRequestId) { match self.config_api.config_requirements_read().await { Ok(response) => self.outgoing.send_response(request_id, response).await, diff --git a/codex-rs/app-server/src/message_processor/tracing_tests.rs b/codex-rs/app-server/src/message_processor/tracing_tests.rs index d110f4feea..ef88364a3a 100644 --- a/codex-rs/app-server/src/message_processor/tracing_tests.rs +++ b/codex-rs/app-server/src/message_processor/tracing_tests.rs @@ -27,6 +27,7 @@ use codex_core::config_loader::CloudRequirementsLoader; use codex_core::config_loader::LoaderOverrides; use codex_exec_server::EnvironmentManager; use codex_feedback::CodexFeedback; +use codex_login::AuthManager; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::W3cTraceContext; use opentelemetry::global; @@ -234,6 +235,8 @@ fn build_test_processor( ) { let (outgoing_tx, outgoing_rx) = mpsc::channel(16); let outgoing = Arc::new(OutgoingMessageSender::new(outgoing_tx)); + let auth_manager = + AuthManager::shared_from_config(config.as_ref(), /*enable_codex_api_key_env*/ false); let processor = MessageProcessor::new(MessageProcessorArgs { outgoing, arg0_paths: Arg0DispatchPaths::default(), @@ -246,8 +249,9 @@ fn build_test_processor( log_db: None, config_warnings: Vec::new(), session_source: SessionSource::VSCode, - enable_codex_api_key_env: false, + auth_manager, rpc_transport: AppServerRpcTransport::Stdio, + remote_control_handle: None, }); (processor, outgoing_rx) } diff --git a/codex-rs/app-server/src/thread_state.rs b/codex-rs/app-server/src/thread_state.rs index be5478dd51..0fe835fc75 100644 --- a/codex-rs/app-server/src/thread_state.rs +++ b/codex-rs/app-server/src/thread_state.rs @@ -16,6 +16,7 @@ use std::sync::Weak; use tokio::sync::Mutex; use tokio::sync::mpsc; use tokio::sync::oneshot; +use tracing::error; type PendingInterruptQueue = Vec<( ConnectionRequestId, @@ -44,6 +45,7 @@ pub(crate) enum ThreadListenerCommand { /// Per-conversation accumulation of the latest states e.g. error message while a turn runs. #[derive(Default, Clone)] pub(crate) struct TurnSummary { + pub(crate) started_at: Option, pub(crate) file_change_started: HashSet, pub(crate) command_execution_started: HashSet, pub(crate) last_error: Option, @@ -109,13 +111,50 @@ impl ThreadState { } pub(crate) fn track_current_turn_event(&mut self, event: &EventMsg) { + if let EventMsg::TurnStarted(payload) = event { + self.turn_summary.started_at = payload.started_at; + } self.current_turn_history.handle_event(event); - if !self.current_turn_history.has_active_turn() { + if matches!(event, EventMsg::TurnAborted(_) | EventMsg::TurnComplete(_)) + && !self.current_turn_history.has_active_turn() + { self.current_turn_history.reset(); } } } +pub(crate) async fn resolve_server_request_on_thread_listener( + thread_state: &Arc>, + request_id: RequestId, +) { + let (completion_tx, completion_rx) = oneshot::channel(); + let listener_command_tx = { + let state = thread_state.lock().await; + state.listener_command_tx() + }; + let Some(listener_command_tx) = listener_command_tx else { + error!("failed to remove pending client request: thread listener is not running"); + return; + }; + + if listener_command_tx + .send(ThreadListenerCommand::ResolveServerRequest { + request_id, + completion_tx, + }) + .is_err() + { + error!( + "failed to remove pending client request: thread listener command channel is closed" + ); + return; + } + + if let Err(err) = completion_rx.await { + error!("failed to remove pending client request: {err}"); + } +} + struct ThreadEntry { state: Arc>, connection_ids: HashSet, diff --git a/codex-rs/app-server/src/transport/mod.rs b/codex-rs/app-server/src/transport/mod.rs index fa744a1af5..92383cb78f 100644 --- a/codex-rs/app-server/src/transport/mod.rs +++ b/codex-rs/app-server/src/transport/mod.rs @@ -17,6 +17,7 @@ use std::str::FromStr; use std::sync::Arc; use std::sync::RwLock; use std::sync::atomic::AtomicBool; +use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; use tokio::sync::mpsc; use tokio_util::sync::CancellationToken; @@ -28,9 +29,12 @@ use tracing::warn; /// plenty for an interactive CLI. pub(crate) const CHANNEL_CAPACITY: usize = 128; +mod remote_control; mod stdio; mod websocket; +pub(crate) use remote_control::RemoteControlHandle; +pub(crate) use remote_control::start_remote_control; pub(crate) use stdio::start_stdio_connection; pub(crate) use websocket::start_websocket_acceptor; @@ -38,6 +42,7 @@ pub(crate) use websocket::start_websocket_acceptor; pub enum AppServerTransport { Stdio, WebSocket { bind_address: SocketAddr }, + Off, } #[derive(Debug, Clone, Eq, PartialEq)] @@ -51,7 +56,7 @@ impl std::fmt::Display for AppServerTransportParseError { match self { AppServerTransportParseError::UnsupportedListenUrl(listen_url) => write!( f, - "unsupported --listen URL `{listen_url}`; expected `stdio://` or `ws://IP:PORT`" + "unsupported --listen URL `{listen_url}`; expected `stdio://`, `ws://IP:PORT`, or `off`" ), AppServerTransportParseError::InvalidWebSocketListenUrl(listen_url) => write!( f, @@ -71,6 +76,10 @@ impl AppServerTransport { return Ok(Self::Stdio); } + if listen_url == "off" { + return Ok(Self::Off); + } + if let Some(socket_addr) = listen_url.strip_prefix("ws://") { let bind_address = socket_addr.parse::().map_err(|_| { AppServerTransportParseError::InvalidWebSocketListenUrl(listen_url.to_string()) @@ -166,6 +175,12 @@ impl OutboundConnectionState { } } +static CONNECTION_ID_COUNTER: AtomicU64 = AtomicU64::new(0); + +fn next_connection_id() -> ConnectionId { + ConnectionId(CONNECTION_ID_COUNTER.fetch_add(1, Ordering::Relaxed)) +} + async fn forward_incoming_message( transport_event_tx: &mpsc::Sender, writer: &mpsc::Sender, @@ -378,8 +393,11 @@ pub(crate) async fn route_outgoing_envelope( #[cfg(test)] mod tests { use super::*; - use crate::error_code::OVERLOADED_ERROR_CODE; use codex_app_server_protocol::ConfigWarningNotification; + use codex_app_server_protocol::JSONRPCNotification; + use codex_app_server_protocol::JSONRPCRequest; + use codex_app_server_protocol::JSONRPCResponse; + use codex_app_server_protocol::RequestId; use codex_app_server_protocol::ServerNotification; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; @@ -393,41 +411,10 @@ mod tests { } #[test] - fn app_server_transport_parses_stdio_listen_url() { - let transport = AppServerTransport::from_listen_url(AppServerTransport::DEFAULT_LISTEN_URL) - .expect("stdio listen URL should parse"); - assert_eq!(transport, AppServerTransport::Stdio); - } - - #[test] - fn app_server_transport_parses_websocket_listen_url() { - let transport = AppServerTransport::from_listen_url("ws://127.0.0.1:1234") - .expect("websocket listen URL should parse"); + fn listen_off_parses_as_off_transport() { assert_eq!( - transport, - AppServerTransport::WebSocket { - bind_address: "127.0.0.1:1234".parse().expect("valid socket address"), - } - ); - } - - #[test] - fn app_server_transport_rejects_invalid_websocket_listen_url() { - let err = AppServerTransport::from_listen_url("ws://localhost:1234") - .expect_err("hostname bind address should be rejected"); - assert_eq!( - err.to_string(), - "invalid websocket --listen URL `ws://localhost:1234`; expected `ws://IP:PORT`" - ); - } - - #[test] - fn app_server_transport_rejects_unsupported_listen_url() { - let err = AppServerTransport::from_listen_url("http://127.0.0.1:1234") - .expect_err("unsupported scheme should fail"); - assert_eq!( - err.to_string(), - "unsupported --listen URL `http://127.0.0.1:1234`; expected `stdio://` or `ws://IP:PORT`" + AppServerTransport::from_listen_url("off"), + Ok(AppServerTransport::Off) ); } @@ -437,11 +424,10 @@ mod tests { let (transport_event_tx, mut transport_event_rx) = mpsc::channel(1); let (writer_tx, mut writer_rx) = mpsc::channel(1); - let first_message = - JSONRPCMessage::Notification(codex_app_server_protocol::JSONRPCNotification { - method: "initialized".to_string(), - params: None, - }); + let first_message = JSONRPCMessage::Notification(JSONRPCNotification { + method: "initialized".to_string(), + params: None, + }); transport_event_tx .send(TransportEvent::IncomingMessage { connection_id, @@ -450,8 +436,8 @@ mod tests { .await .expect("queue should accept first message"); - let request = JSONRPCMessage::Request(codex_app_server_protocol::JSONRPCRequest { - id: codex_app_server_protocol::RequestId::Integer(7), + let request = JSONRPCMessage::Request(JSONRPCRequest { + id: RequestId::Integer(7), method: "config/read".to_string(), params: Some(json!({ "includeLayers": false })), trace: None, @@ -499,11 +485,10 @@ mod tests { let (transport_event_tx, mut transport_event_rx) = mpsc::channel(1); let (writer_tx, _writer_rx) = mpsc::channel(1); - let first_message = - JSONRPCMessage::Notification(codex_app_server_protocol::JSONRPCNotification { - method: "initialized".to_string(), - params: None, - }); + let first_message = JSONRPCMessage::Notification(JSONRPCNotification { + method: "initialized".to_string(), + params: None, + }); transport_event_tx .send(TransportEvent::IncomingMessage { connection_id, @@ -512,8 +497,8 @@ mod tests { .await .expect("queue should accept first message"); - let response = JSONRPCMessage::Response(codex_app_server_protocol::JSONRPCResponse { - id: codex_app_server_protocol::RequestId::Integer(7), + let response = JSONRPCMessage::Response(JSONRPCResponse { + id: RequestId::Integer(7), result: json!({"ok": true}), }); let transport_event_tx_for_enqueue = transport_event_tx.clone(); @@ -553,11 +538,10 @@ mod tests { match forwarded_event { TransportEvent::IncomingMessage { connection_id: queued_connection_id, - message: - JSONRPCMessage::Response(codex_app_server_protocol::JSONRPCResponse { id, result }), + message: JSONRPCMessage::Response(JSONRPCResponse { id, result }), } => { assert_eq!(queued_connection_id, connection_id); - assert_eq!(id, codex_app_server_protocol::RequestId::Integer(7)); + assert_eq!(id, RequestId::Integer(7)); assert_eq!(result, json!({"ok": true})); } _ => panic!("expected forwarded response message"), @@ -573,12 +557,10 @@ mod tests { transport_event_tx .send(TransportEvent::IncomingMessage { connection_id, - message: JSONRPCMessage::Notification( - codex_app_server_protocol::JSONRPCNotification { - method: "initialized".to_string(), - params: None, - }, - ), + message: JSONRPCMessage::Notification(JSONRPCNotification { + method: "initialized".to_string(), + params: None, + }), }) .await .expect("transport queue should accept first message"); @@ -597,15 +579,15 @@ mod tests { .await .expect("writer queue should accept first message"); - let request = JSONRPCMessage::Request(codex_app_server_protocol::JSONRPCRequest { - id: codex_app_server_protocol::RequestId::Integer(7), + let request = JSONRPCMessage::Request(JSONRPCRequest { + id: RequestId::Integer(7), method: "config/read".to_string(), params: Some(json!({ "includeLayers": false })), trace: None, }); - let enqueue_result = tokio::time::timeout( - std::time::Duration::from_millis(100), + let enqueue_result = timeout( + Duration::from_millis(100), enqueue_incoming_message(&transport_event_tx, &writer_tx, connection_id, request), ) .await @@ -781,7 +763,7 @@ mod tests { OutgoingEnvelope::ToConnection { connection_id, message: OutgoingMessage::Request(ServerRequest::CommandExecutionRequestApproval { - request_id: codex_app_server_protocol::RequestId::Integer(1), + request_id: RequestId::Integer(1), params: codex_app_server_protocol::CommandExecutionRequestApprovalParams { thread_id: "thr_123".to_string(), turn_id: "turn_123".to_string(), @@ -843,7 +825,7 @@ mod tests { OutgoingEnvelope::ToConnection { connection_id, message: OutgoingMessage::Request(ServerRequest::CommandExecutionRequestApproval { - request_id: codex_app_server_protocol::RequestId::Integer(1), + request_id: RequestId::Integer(1), params: codex_app_server_protocol::CommandExecutionRequestApprovalParams { thread_id: "thr_123".to_string(), turn_id: "turn_123".to_string(), diff --git a/codex-rs/app-server/src/transport/remote_control/client_tracker.rs b/codex-rs/app-server/src/transport/remote_control/client_tracker.rs new file mode 100644 index 0000000000..fa9a208ade --- /dev/null +++ b/codex-rs/app-server/src/transport/remote_control/client_tracker.rs @@ -0,0 +1,568 @@ +use super::CHANNEL_CAPACITY; +use super::TransportEvent; +use super::next_connection_id; +use super::protocol::ClientEnvelope; +pub use super::protocol::ClientEvent; +pub use super::protocol::ClientId; +use super::protocol::PongStatus; +use super::protocol::ServerEvent; +use super::protocol::StreamId; +use crate::outgoing_message::ConnectionId; +use crate::outgoing_message::QueuedOutgoingMessage; +use crate::transport::remote_control::QueuedServerEnvelope; +use codex_app_server_protocol::JSONRPCMessage; +use std::collections::HashMap; +use tokio::sync::mpsc; +use tokio::sync::watch; +use tokio::task::JoinSet; +use tokio::time::Duration; +use tokio::time::Instant; +use tokio_util::sync::CancellationToken; + +const REMOTE_CONTROL_CLIENT_IDLE_TIMEOUT: Duration = Duration::from_secs(10 * 60); +pub(crate) const REMOTE_CONTROL_IDLE_SWEEP_INTERVAL: Duration = Duration::from_secs(30); + +#[derive(Debug)] +pub(crate) struct Stopped; + +struct ClientState { + connection_id: ConnectionId, + disconnect_token: CancellationToken, + last_activity_at: Instant, + last_inbound_seq_id: Option, + status_tx: watch::Sender, +} + +pub(crate) struct ClientTracker { + clients: HashMap<(ClientId, StreamId), ClientState>, + legacy_stream_ids: HashMap, + join_set: JoinSet<(ClientId, StreamId)>, + server_event_tx: mpsc::Sender, + transport_event_tx: mpsc::Sender, + shutdown_token: CancellationToken, +} + +impl ClientTracker { + pub(crate) fn new( + server_event_tx: mpsc::Sender, + transport_event_tx: mpsc::Sender, + shutdown_token: &CancellationToken, + ) -> Self { + Self { + clients: HashMap::new(), + legacy_stream_ids: HashMap::new(), + join_set: JoinSet::new(), + server_event_tx, + transport_event_tx, + shutdown_token: shutdown_token.child_token(), + } + } + + pub(crate) async fn bookkeep_join_set(&mut self) -> Option<(ClientId, StreamId)> { + while let Some(join_result) = self.join_set.join_next().await { + let Ok(client_key) = join_result else { + continue; + }; + return Some(client_key); + } + futures::future::pending().await + } + + pub(crate) async fn shutdown(&mut self) { + self.shutdown_token.cancel(); + + while let Some(client_key) = self.clients.keys().next().cloned() { + let _ = self.close_client(&client_key).await; + } + + self.drain_join_set().await; + } + + async fn drain_join_set(&mut self) { + while self.join_set.join_next().await.is_some() {} + } + + pub(crate) async fn handle_message( + &mut self, + client_envelope: ClientEnvelope, + ) -> Result<(), Stopped> { + let ClientEnvelope { + client_id, + event, + stream_id, + seq_id, + cursor: _, + } = client_envelope; + let is_legacy_stream_id = stream_id.is_none(); + let is_initialize = matches!(&event, ClientEvent::ClientMessage { message } if remote_control_message_starts_connection(message)); + let stream_id = match stream_id { + Some(stream_id) => stream_id, + None if is_initialize => { + // TODO(ruslan): delete this fallback once all clients are updated to send stream_id. + self.legacy_stream_ids + .remove(&client_id) + .unwrap_or_else(StreamId::new_random) + } + None => self + .legacy_stream_ids + .get(&client_id) + .cloned() + .unwrap_or_else(|| { + if matches!(&event, ClientEvent::Ping) { + StreamId::new_random() + } else { + StreamId(String::new()) + } + }), + }; + if stream_id.0.is_empty() { + return Ok(()); + } + let client_key = (client_id.clone(), stream_id.clone()); + match event { + ClientEvent::ClientMessage { message } => { + if let Some(seq_id) = seq_id + && let Some(client) = self.clients.get(&client_key) + && client + .last_inbound_seq_id + .is_some_and(|last_seq_id| last_seq_id >= seq_id) + && !is_initialize + { + return Ok(()); + } + + if is_initialize && self.clients.contains_key(&client_key) { + self.close_client(&client_key).await?; + } + + if let Some(connection_id) = self.clients.get_mut(&client_key).map(|client| { + client.last_activity_at = Instant::now(); + if let Some(seq_id) = seq_id { + client.last_inbound_seq_id = Some(seq_id); + } + client.connection_id + }) { + self.send_transport_event(TransportEvent::IncomingMessage { + connection_id, + message, + }) + .await?; + return Ok(()); + } + + if !is_initialize { + return Ok(()); + } + + let connection_id = next_connection_id(); + let (writer_tx, writer_rx) = + mpsc::channel::(CHANNEL_CAPACITY); + let disconnect_token = self.shutdown_token.child_token(); + self.send_transport_event(TransportEvent::ConnectionOpened { + connection_id, + writer: writer_tx, + disconnect_sender: Some(disconnect_token.clone()), + }) + .await?; + + let (status_tx, status_rx) = watch::channel(PongStatus::Active); + self.join_set.spawn(Self::run_client_outbound( + client_id.clone(), + stream_id.clone(), + self.server_event_tx.clone(), + writer_rx, + status_rx, + disconnect_token.clone(), + )); + self.clients.insert( + client_key, + ClientState { + connection_id, + disconnect_token, + last_activity_at: Instant::now(), + last_inbound_seq_id: if is_legacy_stream_id { None } else { seq_id }, + status_tx, + }, + ); + if is_legacy_stream_id { + self.legacy_stream_ids.insert(client_id.clone(), stream_id); + } + self.send_transport_event(TransportEvent::IncomingMessage { + connection_id, + message, + }) + .await + } + ClientEvent::Ack => Ok(()), + ClientEvent::Ping => { + if let Some(client) = self.clients.get_mut(&client_key) { + client.last_activity_at = Instant::now(); + let _ = client.status_tx.send(PongStatus::Active); + return Ok(()); + } + + let server_event_tx = self.server_event_tx.clone(); + tokio::spawn(async move { + let server_envelope = QueuedServerEnvelope { + event: ServerEvent::Pong { + status: PongStatus::Unknown, + }, + client_id, + stream_id, + write_complete_tx: None, + }; + let _ = server_event_tx.send(server_envelope).await; + }); + Ok(()) + } + ClientEvent::ClientClosed => self.close_client(&client_key).await, + } + } + + async fn run_client_outbound( + client_id: ClientId, + stream_id: StreamId, + server_event_tx: mpsc::Sender, + mut writer_rx: mpsc::Receiver, + mut status_rx: watch::Receiver, + disconnect_token: CancellationToken, + ) -> (ClientId, StreamId) { + loop { + let (event, write_complete_tx) = tokio::select! { + _ = disconnect_token.cancelled() => { + break; + } + queued_message = writer_rx.recv() => { + let Some(queued_message) = queued_message else { + break; + }; + let event = ServerEvent::ServerMessage { + message: Box::new(queued_message.message), + }; + (event, queued_message.write_complete_tx) + } + changed = status_rx.changed() => { + if changed.is_err() { + break; + } + let event = ServerEvent::Pong { status: status_rx.borrow().clone() }; + (event, None) + } + }; + let send_result = tokio::select! { + _ = disconnect_token.cancelled() => { + break; + } + send_result = server_event_tx.send(QueuedServerEnvelope { + event, + client_id: client_id.clone(), + stream_id: stream_id.clone(), + write_complete_tx, + }) => send_result, + }; + if send_result.is_err() { + break; + } + } + (client_id, stream_id) + } + + pub(crate) async fn close_expired_clients( + &mut self, + ) -> Result, Stopped> { + let now = Instant::now(); + let expired_client_ids: Vec<(ClientId, StreamId)> = self + .clients + .iter() + .filter_map(|(client_key, client)| { + (!remote_control_client_is_alive(client, now)).then_some(client_key.clone()) + }) + .collect(); + for client_key in &expired_client_ids { + self.close_client(client_key).await?; + } + Ok(expired_client_ids) + } + + pub(super) async fn close_client( + &mut self, + client_key: &(ClientId, StreamId), + ) -> Result<(), Stopped> { + let Some(client) = self.clients.remove(client_key) else { + return Ok(()); + }; + if self + .legacy_stream_ids + .get(&client_key.0) + .is_some_and(|stream_id| stream_id == &client_key.1) + { + self.legacy_stream_ids.remove(&client_key.0); + } + client.disconnect_token.cancel(); + self.send_transport_event(TransportEvent::ConnectionClosed { + connection_id: client.connection_id, + }) + .await + } + + async fn send_transport_event(&self, event: TransportEvent) -> Result<(), Stopped> { + self.transport_event_tx + .send(event) + .await + .map_err(|_| Stopped) + } +} + +fn remote_control_message_starts_connection(message: &JSONRPCMessage) -> bool { + matches!( + message, + JSONRPCMessage::Request(codex_app_server_protocol::JSONRPCRequest { method, .. }) + if method == "initialize" + ) +} + +fn remote_control_client_is_alive(client: &ClientState, now: Instant) -> bool { + now.duration_since(client.last_activity_at) < REMOTE_CONTROL_CLIENT_IDLE_TIMEOUT +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::outgoing_message::OutgoingMessage; + use crate::transport::remote_control::protocol::ClientEnvelope; + use crate::transport::remote_control::protocol::ClientEvent; + use codex_app_server_protocol::ConfigWarningNotification; + use codex_app_server_protocol::JSONRPCRequest; + use codex_app_server_protocol::RequestId; + use codex_app_server_protocol::ServerNotification; + use pretty_assertions::assert_eq; + use serde_json::json; + use tokio::time::timeout; + + fn initialize_envelope(client_id: &str) -> ClientEnvelope { + initialize_envelope_with_stream_id(client_id, /*stream_id*/ None) + } + + fn initialize_envelope_with_stream_id( + client_id: &str, + stream_id: Option<&str>, + ) -> ClientEnvelope { + ClientEnvelope { + event: ClientEvent::ClientMessage { + message: JSONRPCMessage::Request(JSONRPCRequest { + id: RequestId::Integer(1), + method: "initialize".to_string(), + params: Some(json!({ + "clientInfo": { + "name": "remote-test-client", + "version": "0.1.0" + } + })), + trace: None, + }), + }, + client_id: ClientId(client_id.to_string()), + stream_id: stream_id.map(|stream_id| StreamId(stream_id.to_string())), + seq_id: Some(0), + cursor: None, + } + } + + #[tokio::test] + async fn cancelled_outbound_task_emits_connection_closed() { + let (server_event_tx, _server_event_rx) = mpsc::channel(CHANNEL_CAPACITY); + let (transport_event_tx, mut transport_event_rx) = mpsc::channel(CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let mut client_tracker = + ClientTracker::new(server_event_tx, transport_event_tx, &shutdown_token); + + client_tracker + .handle_message(initialize_envelope("client-1")) + .await + .expect("initialize should open client"); + + let (connection_id, disconnect_sender) = match transport_event_rx + .recv() + .await + .expect("connection opened should be sent") + { + TransportEvent::ConnectionOpened { + connection_id, + disconnect_sender: Some(disconnect_sender), + .. + } => (connection_id, disconnect_sender), + other => panic!("expected connection opened, got {other:?}"), + }; + match transport_event_rx + .recv() + .await + .expect("initialize should be forwarded") + { + TransportEvent::IncomingMessage { + connection_id: incoming_connection_id, + .. + } => assert_eq!(incoming_connection_id, connection_id), + other => panic!("expected incoming initialize, got {other:?}"), + } + + disconnect_sender.cancel(); + let closed_client_id = timeout(Duration::from_secs(1), client_tracker.bookkeep_join_set()) + .await + .expect("bookkeeping should process the closed task") + .expect("closed task should return client id"); + assert_eq!(closed_client_id.0, ClientId("client-1".to_string())); + client_tracker + .close_client(&closed_client_id) + .await + .expect("closed client should emit connection closed"); + + match transport_event_rx + .recv() + .await + .expect("connection closed should be sent") + { + TransportEvent::ConnectionClosed { + connection_id: closed_connection_id, + } => assert_eq!(closed_connection_id, connection_id), + other => panic!("expected connection closed, got {other:?}"), + } + } + + #[tokio::test] + async fn shutdown_cancels_blocked_outbound_forwarding() { + let (server_event_tx, _server_event_rx) = mpsc::channel(1); + let (transport_event_tx, mut transport_event_rx) = mpsc::channel(CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let mut client_tracker = + ClientTracker::new(server_event_tx.clone(), transport_event_tx, &shutdown_token); + + server_event_tx + .send(QueuedServerEnvelope { + event: ServerEvent::Pong { + status: PongStatus::Unknown, + }, + client_id: ClientId("queued-client".to_string()), + stream_id: StreamId("queued-stream".to_string()), + write_complete_tx: None, + }) + .await + .expect("server event queue should accept prefill"); + + client_tracker + .handle_message(initialize_envelope("client-1")) + .await + .expect("initialize should open client"); + + let writer = match transport_event_rx + .recv() + .await + .expect("connection opened should be sent") + { + TransportEvent::ConnectionOpened { writer, .. } => writer, + other => panic!("expected connection opened, got {other:?}"), + }; + let _ = transport_event_rx + .recv() + .await + .expect("initialize should be forwarded"); + + writer + .send(QueuedOutgoingMessage::new( + OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning( + ConfigWarningNotification { + summary: "test".to_string(), + details: None, + path: None, + range: None, + }, + )), + )) + .await + .expect("writer should accept queued message"); + + timeout(Duration::from_secs(1), client_tracker.shutdown()) + .await + .expect("shutdown should not hang on blocked server forwarding"); + } + + #[tokio::test] + async fn initialize_with_new_stream_id_opens_new_connection_for_same_client() { + let (server_event_tx, _server_event_rx) = mpsc::channel(CHANNEL_CAPACITY); + let (transport_event_tx, mut transport_event_rx) = mpsc::channel(CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let mut client_tracker = + ClientTracker::new(server_event_tx, transport_event_tx, &shutdown_token); + + client_tracker + .handle_message(initialize_envelope_with_stream_id( + "client-1", + Some("stream-1"), + )) + .await + .expect("first initialize should open client"); + let first_connection_id = match transport_event_rx.recv().await.expect("open event") { + TransportEvent::ConnectionOpened { connection_id, .. } => connection_id, + other => panic!("expected connection opened, got {other:?}"), + }; + let _ = transport_event_rx.recv().await.expect("initialize event"); + + client_tracker + .handle_message(initialize_envelope_with_stream_id( + "client-1", + Some("stream-2"), + )) + .await + .expect("second initialize should open client"); + let second_connection_id = match transport_event_rx.recv().await.expect("open event") { + TransportEvent::ConnectionOpened { connection_id, .. } => connection_id, + other => panic!("expected connection opened, got {other:?}"), + }; + + assert_ne!(first_connection_id, second_connection_id); + } + + #[tokio::test] + async fn legacy_initialize_without_stream_id_resets_inbound_seq_id() { + let (server_event_tx, _server_event_rx) = mpsc::channel(CHANNEL_CAPACITY); + let (transport_event_tx, mut transport_event_rx) = mpsc::channel(CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let mut client_tracker = + ClientTracker::new(server_event_tx, transport_event_tx, &shutdown_token); + + client_tracker + .handle_message(initialize_envelope("client-1")) + .await + .expect("initialize should open client"); + let connection_id = match transport_event_rx.recv().await.expect("open event") { + TransportEvent::ConnectionOpened { connection_id, .. } => connection_id, + other => panic!("expected connection opened, got {other:?}"), + }; + let _ = transport_event_rx.recv().await.expect("initialize event"); + + client_tracker + .handle_message(ClientEnvelope { + event: ClientEvent::ClientMessage { + message: JSONRPCMessage::Notification( + codex_app_server_protocol::JSONRPCNotification { + method: "initialized".to_string(), + params: None, + }, + ), + }, + client_id: ClientId("client-1".to_string()), + stream_id: None, + seq_id: Some(0), + cursor: None, + }) + .await + .expect("legacy followup should be forwarded"); + + match transport_event_rx.recv().await.expect("followup event") { + TransportEvent::IncomingMessage { + connection_id: incoming_connection_id, + .. + } => assert_eq!(incoming_connection_id, connection_id), + other => panic!("expected incoming message, got {other:?}"), + } + } +} diff --git a/codex-rs/app-server/src/transport/remote_control/enroll.rs b/codex-rs/app-server/src/transport/remote_control/enroll.rs new file mode 100644 index 0000000000..dbe18c8355 --- /dev/null +++ b/codex-rs/app-server/src/transport/remote_control/enroll.rs @@ -0,0 +1,503 @@ +use super::protocol::EnrollRemoteServerRequest; +use super::protocol::EnrollRemoteServerResponse; +use super::protocol::RemoteControlTarget; +use axum::http::HeaderMap; +use codex_login::default_client::build_reqwest_client; +use codex_state::RemoteControlEnrollmentRecord; +use codex_state::StateRuntime; +use gethostname::gethostname; +use std::io; +use std::io::ErrorKind; +use tracing::info; +use tracing::warn; + +const REMOTE_CONTROL_ENROLL_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30); +const REMOTE_CONTROL_RESPONSE_BODY_MAX_BYTES: usize = 4096; + +const REQUEST_ID_HEADER: &str = "x-request-id"; +const OAI_REQUEST_ID_HEADER: &str = "x-oai-request-id"; +const CF_RAY_HEADER: &str = "cf-ray"; +pub(super) const REMOTE_CONTROL_ACCOUNT_ID_HEADER: &str = "chatgpt-account-id"; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(super) struct RemoteControlEnrollment { + pub(super) account_id: String, + pub(super) environment_id: String, + pub(super) server_id: String, + pub(super) server_name: String, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(super) struct RemoteControlConnectionAuth { + pub(super) bearer_token: String, + pub(super) account_id: String, +} + +pub(super) async fn load_persisted_remote_control_enrollment( + state_db: Option<&StateRuntime>, + remote_control_target: &RemoteControlTarget, + account_id: &str, + app_server_client_name: Option<&str>, +) -> Option { + let Some(state_db) = state_db else { + info!( + "remote control enrollment cache unavailable because sqlite state db is disabled: websocket_url={}, account_id={}, app_server_client_name={:?}", + remote_control_target.websocket_url, account_id, app_server_client_name + ); + return None; + }; + let enrollment = match state_db + .get_remote_control_enrollment( + &remote_control_target.websocket_url, + account_id, + app_server_client_name, + ) + .await + { + Ok(enrollment) => enrollment, + Err(err) => { + warn!( + "failed to load persisted remote control enrollment: websocket_url={}, account_id={}, app_server_client_name={:?}, err={err}", + remote_control_target.websocket_url, account_id, app_server_client_name + ); + return None; + } + }; + + match enrollment { + Some(enrollment) => { + info!( + "reusing persisted remote control enrollment: websocket_url={}, account_id={}, app_server_client_name={:?}, server_id={}, environment_id={}", + remote_control_target.websocket_url, + account_id, + app_server_client_name, + enrollment.server_id, + enrollment.environment_id + ); + Some(RemoteControlEnrollment { + account_id: enrollment.account_id, + environment_id: enrollment.environment_id, + server_id: enrollment.server_id, + server_name: enrollment.server_name, + }) + } + None => { + info!( + "no persisted remote control enrollment found: websocket_url={}, account_id={}, app_server_client_name={:?}", + remote_control_target.websocket_url, account_id, app_server_client_name + ); + None + } + } +} + +pub(super) async fn update_persisted_remote_control_enrollment( + state_db: Option<&StateRuntime>, + remote_control_target: &RemoteControlTarget, + account_id: &str, + app_server_client_name: Option<&str>, + enrollment: Option<&RemoteControlEnrollment>, +) -> io::Result<()> { + let Some(state_db) = state_db else { + info!( + "skipping remote control enrollment persistence because sqlite state db is disabled: websocket_url={}, account_id={}, app_server_client_name={:?}, has_enrollment={}", + remote_control_target.websocket_url, + account_id, + app_server_client_name, + enrollment.is_some() + ); + return Ok(()); + }; + if let &Some(enrollment) = &enrollment + && enrollment.account_id != account_id + { + return Err(io::Error::other(format!( + "enrollment account_id does not match expected account_id `{account_id}`" + ))); + } + + if let Some(enrollment) = enrollment { + state_db + .upsert_remote_control_enrollment(&RemoteControlEnrollmentRecord { + websocket_url: remote_control_target.websocket_url.clone(), + account_id: account_id.to_string(), + app_server_client_name: app_server_client_name.map(str::to_string), + server_id: enrollment.server_id.clone(), + environment_id: enrollment.environment_id.clone(), + server_name: enrollment.server_name.clone(), + }) + .await + .map_err(io::Error::other)?; + info!( + "persisted remote control enrollment: websocket_url={}, account_id={}, app_server_client_name={:?}, server_id={}, environment_id={}", + remote_control_target.websocket_url, + account_id, + app_server_client_name, + enrollment.server_id, + enrollment.environment_id + ); + Ok(()) + } else { + let rows_affected = state_db + .delete_remote_control_enrollment( + &remote_control_target.websocket_url, + account_id, + app_server_client_name, + ) + .await + .map_err(io::Error::other)?; + info!( + "cleared persisted remote control enrollment: websocket_url={}, account_id={}, app_server_client_name={:?}, rows_affected={rows_affected}", + remote_control_target.websocket_url, account_id, app_server_client_name + ); + Ok(()) + } +} + +pub(crate) fn preview_remote_control_response_body(body: &[u8]) -> String { + let body = String::from_utf8_lossy(body); + let trimmed = body.trim(); + if trimmed.is_empty() { + return "".to_string(); + } + if trimmed.len() <= REMOTE_CONTROL_RESPONSE_BODY_MAX_BYTES { + return trimmed.to_string(); + } + + let mut cut = REMOTE_CONTROL_RESPONSE_BODY_MAX_BYTES; + while !trimmed.is_char_boundary(cut) { + cut = cut.saturating_sub(1); + } + let mut truncated = trimmed[..cut].to_string(); + truncated.push_str("..."); + truncated +} + +pub(crate) fn format_headers(headers: &HeaderMap) -> String { + let request_id_str = headers + .get(REQUEST_ID_HEADER) + .or_else(|| headers.get(OAI_REQUEST_ID_HEADER)) + .map(|value| value.to_str().unwrap_or("").to_owned()) + .unwrap_or_else(|| "".to_owned()); + let cf_ray_str = headers + .get(CF_RAY_HEADER) + .map(|value| value.to_str().unwrap_or("").to_owned()) + .unwrap_or_else(|| "".to_owned()); + format!("request-id: {request_id_str}, cf-ray: {cf_ray_str}") +} + +pub(super) async fn enroll_remote_control_server( + remote_control_target: &RemoteControlTarget, + auth: &RemoteControlConnectionAuth, +) -> io::Result { + let enroll_url = &remote_control_target.enroll_url; + let server_name = gethostname().to_string_lossy().trim().to_string(); + let request = EnrollRemoteServerRequest { + name: server_name.clone(), + os: std::env::consts::OS, + arch: std::env::consts::ARCH, + app_server_version: env!("CARGO_PKG_VERSION"), + }; + let client = build_reqwest_client(); + let http_request = client + .post(enroll_url) + .timeout(REMOTE_CONTROL_ENROLL_TIMEOUT) + .bearer_auth(&auth.bearer_token) + .header(REMOTE_CONTROL_ACCOUNT_ID_HEADER, &auth.account_id) + .json(&request); + + let response = http_request.send().await.map_err(|err| { + io::Error::other(format!( + "failed to enroll remote control server at `{enroll_url}`: {err}" + )) + })?; + let headers = response.headers().clone(); + let status = response.status(); + let body = response.bytes().await.map_err(|err| { + io::Error::other(format!( + "failed to read remote control enrollment response from `{enroll_url}`: {err}" + )) + })?; + let body_preview = preview_remote_control_response_body(&body); + if !status.is_success() { + let headers_str = format_headers(&headers); + let error_kind = if matches!(status.as_u16(), 401 | 403) { + ErrorKind::PermissionDenied + } else { + ErrorKind::Other + }; + return Err(io::Error::new( + error_kind, + format!( + "remote control server enrollment failed at `{enroll_url}`: HTTP {status}, {headers_str}, body: {body_preview}" + ), + )); + } + + let enrollment = serde_json::from_slice::(&body).map_err(|err| { + let headers_str = format_headers(&headers); + io::Error::other(format!( + "failed to parse remote control enrollment response from `{enroll_url}`: HTTP {status}, {headers_str}, body: {body_preview}, decode error: {err}" + )) + })?; + + Ok(RemoteControlEnrollment { + account_id: auth.account_id.clone(), + environment_id: enrollment.environment_id, + server_id: enrollment.server_id, + server_name, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::transport::remote_control::protocol::normalize_remote_control_url; + use codex_state::StateRuntime; + use pretty_assertions::assert_eq; + use serde_json::json; + use std::sync::Arc; + use tempfile::TempDir; + use tokio::io::AsyncBufReadExt; + use tokio::io::AsyncWriteExt; + use tokio::io::BufReader; + use tokio::net::TcpListener; + use tokio::net::TcpStream; + use tokio::time::Duration; + use tokio::time::timeout; + + async fn remote_control_state_runtime(codex_home: &TempDir) -> Arc { + StateRuntime::init(codex_home.path().to_path_buf(), "test-provider".to_string()) + .await + .expect("state runtime should initialize") + } + + #[tokio::test] + async fn persisted_remote_control_enrollment_round_trips_by_target_and_account() { + let codex_home = TempDir::new().expect("temp dir should create"); + let state_db = remote_control_state_runtime(&codex_home).await; + let first_target = normalize_remote_control_url("https://chatgpt.com/remote/control") + .expect("first target should parse"); + let second_target = + normalize_remote_control_url("https://api.chatgpt-staging.com/other/control") + .expect("second target should parse"); + let first_enrollment = RemoteControlEnrollment { + account_id: "account-a".to_string(), + environment_id: "env_first".to_string(), + server_id: "srv_e_first".to_string(), + server_name: "first-server".to_string(), + }; + let second_enrollment = RemoteControlEnrollment { + account_id: "account-a".to_string(), + environment_id: "env_second".to_string(), + server_id: "srv_e_second".to_string(), + server_name: "second-server".to_string(), + }; + + update_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &first_target, + "account-a", + Some("desktop-client"), + Some(&first_enrollment), + ) + .await + .expect("first enrollment should persist"); + update_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &second_target, + "account-a", + Some("desktop-client"), + Some(&second_enrollment), + ) + .await + .expect("second enrollment should persist"); + + assert_eq!( + load_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &first_target, + "account-a", + Some("desktop-client"), + ) + .await, + Some(first_enrollment.clone()) + ); + assert_eq!( + load_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &first_target, + "account-b", + Some("desktop-client"), + ) + .await, + None + ); + assert_eq!( + load_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &second_target, + "account-a", + Some("desktop-client"), + ) + .await, + Some(second_enrollment) + ); + } + + #[tokio::test] + async fn clearing_persisted_remote_control_enrollment_removes_only_matching_entry() { + let codex_home = TempDir::new().expect("temp dir should create"); + let state_db = remote_control_state_runtime(&codex_home).await; + let first_target = normalize_remote_control_url("https://chatgpt.com/remote/control") + .expect("first target should parse"); + let second_target = + normalize_remote_control_url("https://api.chatgpt-staging.com/other/control") + .expect("second target should parse"); + let first_enrollment = RemoteControlEnrollment { + account_id: "account-a".to_string(), + environment_id: "env_first".to_string(), + server_id: "srv_e_first".to_string(), + server_name: "first-server".to_string(), + }; + let second_enrollment = RemoteControlEnrollment { + account_id: "account-a".to_string(), + environment_id: "env_second".to_string(), + server_id: "srv_e_second".to_string(), + server_name: "second-server".to_string(), + }; + + update_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &first_target, + "account-a", + /*app_server_client_name*/ None, + Some(&first_enrollment), + ) + .await + .expect("first enrollment should persist"); + update_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &second_target, + "account-a", + /*app_server_client_name*/ None, + Some(&second_enrollment), + ) + .await + .expect("second enrollment should persist"); + + update_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &first_target, + "account-a", + /*app_server_client_name*/ None, + /*enrollment*/ None, + ) + .await + .expect("matching enrollment should clear"); + + assert_eq!( + load_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &first_target, + "account-a", + /*app_server_client_name*/ None, + ) + .await, + None + ); + assert_eq!( + load_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &second_target, + "account-a", + /*app_server_client_name*/ None, + ) + .await, + Some(second_enrollment) + ); + } + + #[tokio::test] + async fn enroll_remote_control_server_parse_failure_includes_response_body() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = format!( + "http://127.0.0.1:{}/backend-api/", + listener + .local_addr() + .expect("listener should have a local addr") + .port() + ); + let remote_control_target = + normalize_remote_control_url(&remote_control_url).expect("target should parse"); + let enroll_url = remote_control_target.enroll_url.clone(); + let response_body = json!({ + "error": "not enrolled", + }); + let expected_body = response_body.to_string(); + let server_task = tokio::spawn(async move { + let stream = accept_http_request(&listener).await; + respond_with_json(stream, response_body).await; + }); + + let err = enroll_remote_control_server( + &remote_control_target, + &RemoteControlConnectionAuth { + bearer_token: "Access Token".to_string(), + account_id: "account_id".to_string(), + }, + ) + .await + .expect_err("invalid response should fail to parse"); + + server_task.await.expect("server task should succeed"); + assert_eq!( + err.to_string(), + format!( + "failed to parse remote control enrollment response from `{enroll_url}`: HTTP 200 OK, request-id: , cf-ray: , body: {expected_body}, decode error: missing field `server_id` at line 1 column {}", + expected_body.len() + ) + ); + } + + async fn accept_http_request(listener: &TcpListener) -> TcpStream { + let (stream, _) = timeout(Duration::from_secs(5), listener.accept()) + .await + .expect("HTTP request should arrive in time") + .expect("listener accept should succeed"); + let mut reader = BufReader::new(stream); + + let mut request_line = String::new(); + reader + .read_line(&mut request_line) + .await + .expect("request line should read"); + loop { + let mut line = String::new(); + reader + .read_line(&mut line) + .await + .expect("header line should read"); + if line == "\r\n" { + break; + } + } + + reader.into_inner() + } + + async fn respond_with_json(mut stream: TcpStream, body: serde_json::Value) { + let body = body.to_string(); + let response = format!( + "HTTP/1.1 200 OK\r\ncontent-type: application/json\r\ncontent-length: {}\r\nconnection: close\r\n\r\n{body}", + body.len() + ); + stream + .write_all(response.as_bytes()) + .await + .expect("response should write"); + stream.flush().await.expect("response should flush"); + } +} diff --git a/codex-rs/app-server/src/transport/remote_control/mod.rs b/codex-rs/app-server/src/transport/remote_control/mod.rs new file mode 100644 index 0000000000..1ea89bb643 --- /dev/null +++ b/codex-rs/app-server/src/transport/remote_control/mod.rs @@ -0,0 +1,100 @@ +mod client_tracker; +mod enroll; +mod protocol; +mod websocket; + +use crate::transport::remote_control::websocket::RemoteControlWebsocket; +use crate::transport::remote_control::websocket::load_remote_control_auth; + +pub use self::protocol::ClientId; +use self::protocol::ServerEvent; +use self::protocol::StreamId; +use self::protocol::normalize_remote_control_url; +use super::CHANNEL_CAPACITY; +use super::TransportEvent; +use super::next_connection_id; +use codex_login::AuthManager; +use codex_state::StateRuntime; +use std::io; +use std::sync::Arc; +use tokio::sync::mpsc; +use tokio::sync::oneshot; +use tokio::sync::watch; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; + +pub(super) struct QueuedServerEnvelope { + pub(super) event: ServerEvent, + pub(super) client_id: ClientId, + pub(super) stream_id: StreamId, + pub(super) write_complete_tx: Option>, +} + +#[derive(Clone)] +pub(crate) struct RemoteControlHandle { + enabled_tx: Arc>, +} + +impl RemoteControlHandle { + pub(crate) fn set_enabled(&self, enabled: bool) { + self.enabled_tx.send_if_modified(|state| { + let changed = *state != enabled; + *state = enabled; + changed + }); + } +} + +pub(crate) async fn start_remote_control( + remote_control_url: String, + state_db: Option>, + auth_manager: Arc, + transport_event_tx: mpsc::Sender, + shutdown_token: CancellationToken, + app_server_client_name_rx: Option>, + initial_enabled: bool, +) -> io::Result<(JoinHandle<()>, RemoteControlHandle)> { + let remote_control_target = if initial_enabled { + Some(normalize_remote_control_url(&remote_control_url)?) + } else { + None + }; + if initial_enabled { + validate_remote_control_auth(&auth_manager).await?; + } + + let (enabled_tx, enabled_rx) = watch::channel(initial_enabled); + let join_handle = tokio::spawn(async move { + RemoteControlWebsocket::new( + remote_control_url, + remote_control_target, + state_db, + auth_manager, + transport_event_tx, + shutdown_token, + enabled_rx, + ) + .run(app_server_client_name_rx) + .await; + }); + + Ok(( + join_handle, + RemoteControlHandle { + enabled_tx: Arc::new(enabled_tx), + }, + )) +} + +pub(crate) async fn validate_remote_control_auth( + auth_manager: &Arc, +) -> io::Result<()> { + match load_remote_control_auth(auth_manager).await { + Ok(_) => Ok(()), + Err(err) if err.kind() == io::ErrorKind::WouldBlock => Ok(()), + Err(err) => Err(err), + } +} + +#[cfg(test)] +mod tests; diff --git a/codex-rs/app-server/src/transport/remote_control/protocol.rs b/codex-rs/app-server/src/transport/remote_control/protocol.rs new file mode 100644 index 0000000000..857855f2a0 --- /dev/null +++ b/codex-rs/app-server/src/transport/remote_control/protocol.rs @@ -0,0 +1,252 @@ +use crate::outgoing_message::OutgoingMessage; +use codex_app_server_protocol::JSONRPCMessage; +use serde::Deserialize; +use serde::Serialize; +use std::io; +use std::io::ErrorKind; +use url::Host; +use url::Url; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(super) struct RemoteControlTarget { + pub(super) websocket_url: String, + pub(super) enroll_url: String, +} + +#[derive(Debug, Serialize)] +pub(super) struct EnrollRemoteServerRequest { + pub(super) name: String, + pub(super) os: &'static str, + pub(super) arch: &'static str, + pub(super) app_server_version: &'static str, +} + +#[derive(Debug, Deserialize)] +pub(super) struct EnrollRemoteServerResponse { + pub(super) server_id: String, + pub(super) environment_id: String, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(transparent)] +pub struct ClientId(pub String); + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(transparent)] +pub struct StreamId(pub String); + +impl StreamId { + pub fn new_random() -> Self { + Self(uuid::Uuid::now_v7().to_string()) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum ClientEvent { + ClientMessage { + message: JSONRPCMessage, + }, + /// Backend-generated acknowledgement for all server envelopes addressed to + /// `client_id` whose envelope `seq_id` is less than or equal to this ack's + /// `seq_id`. This cursor is client-scoped, not stream-scoped, so receivers + /// must not use `stream_id` to partition acks. + Ack, + Ping, + ClientClosed, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub(crate) struct ClientEnvelope { + #[serde(flatten)] + pub(crate) event: ClientEvent, + #[serde(rename = "client_id")] + pub(crate) client_id: ClientId, + #[serde(rename = "stream_id", skip_serializing_if = "Option::is_none")] + pub(crate) stream_id: Option, + /// For `Ack`, this is the backend-generated per-client cursor over + /// `ServerEnvelope.seq_id`. + #[serde(rename = "seq_id", skip_serializing_if = "Option::is_none")] + pub(crate) seq_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub(crate) cursor: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum PongStatus { + Active, + Unknown, +} + +#[derive(Debug, Clone, Serialize)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum ServerEvent { + ServerMessage { + message: Box, + }, + #[allow(dead_code)] + Ack, + Pong { + status: PongStatus, + }, +} + +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "snake_case")] +pub(crate) struct ServerEnvelope { + #[serde(flatten)] + pub(crate) event: ServerEvent, + #[serde(rename = "client_id")] + pub(crate) client_id: ClientId, + #[serde(rename = "stream_id")] + pub(crate) stream_id: StreamId, + #[serde(rename = "seq_id")] + pub(crate) seq_id: u64, +} + +fn is_allowed_chatgpt_host(host: &Option>) -> bool { + let Some(Host::Domain(host)) = *host else { + return false; + }; + host == "chatgpt.com" + || host == "chatgpt-staging.com" + || host.ends_with(".chatgpt.com") + || host.ends_with(".chatgpt-staging.com") +} + +fn is_localhost(host: &Option>) -> bool { + match host { + Some(Host::Domain("localhost")) => true, + Some(Host::Ipv4(ip)) => ip.is_loopback(), + Some(Host::Ipv6(ip)) => ip.is_loopback(), + _ => false, + } +} + +pub(super) fn normalize_remote_control_url( + remote_control_url: &str, +) -> io::Result { + let map_url_parse_error = |err: url::ParseError| -> io::Error { + io::Error::new( + ErrorKind::InvalidInput, + format!("invalid remote control URL `{remote_control_url}`: {err}"), + ) + }; + let map_scheme_error = |_: ()| -> io::Error { + io::Error::new( + ErrorKind::InvalidInput, + format!( + "invalid remote control URL `{remote_control_url}`; expected HTTPS URL for chatgpt.com or chatgpt-staging.com, or HTTP/HTTPS URL for localhost" + ), + ) + }; + + let mut remote_control_url = Url::parse(remote_control_url).map_err(map_url_parse_error)?; + if !remote_control_url.path().ends_with('/') { + let normalized_path = format!("{}/", remote_control_url.path()); + remote_control_url.set_path(&normalized_path); + } + + let enroll_url = remote_control_url + .join("wham/remote/control/server/enroll") + .map_err(map_url_parse_error)?; + let mut websocket_url = remote_control_url + .join("wham/remote/control/server") + .map_err(map_url_parse_error)?; + let host = enroll_url.host(); + match enroll_url.scheme() { + "https" if is_localhost(&host) || is_allowed_chatgpt_host(&host) => { + websocket_url.set_scheme("wss").map_err(map_scheme_error)?; + } + "http" if is_localhost(&host) => { + websocket_url.set_scheme("ws").map_err(map_scheme_error)?; + } + _ => return Err(map_scheme_error(())), + } + + Ok(RemoteControlTarget { + websocket_url: websocket_url.to_string(), + enroll_url: enroll_url.to_string(), + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + + #[test] + fn normalize_remote_control_url_accepts_chatgpt_https_urls() { + assert_eq!( + normalize_remote_control_url("https://chatgpt.com/backend-api") + .expect("chatgpt.com URL should normalize"), + RemoteControlTarget { + websocket_url: "wss://chatgpt.com/backend-api/wham/remote/control/server" + .to_string(), + enroll_url: "https://chatgpt.com/backend-api/wham/remote/control/server/enroll" + .to_string(), + } + ); + assert_eq!( + normalize_remote_control_url("https://api.chatgpt-staging.com/backend-api") + .expect("chatgpt-staging.com subdomain URL should normalize"), + RemoteControlTarget { + websocket_url: + "wss://api.chatgpt-staging.com/backend-api/wham/remote/control/server" + .to_string(), + enroll_url: + "https://api.chatgpt-staging.com/backend-api/wham/remote/control/server/enroll" + .to_string(), + } + ); + } + + #[test] + fn normalize_remote_control_url_accepts_localhost_urls() { + assert_eq!( + normalize_remote_control_url("http://localhost:8080/backend-api") + .expect("localhost http URL should normalize"), + RemoteControlTarget { + websocket_url: "ws://localhost:8080/backend-api/wham/remote/control/server" + .to_string(), + enroll_url: "http://localhost:8080/backend-api/wham/remote/control/server/enroll" + .to_string(), + } + ); + assert_eq!( + normalize_remote_control_url("https://localhost:8443/backend-api") + .expect("localhost https URL should normalize"), + RemoteControlTarget { + websocket_url: "wss://localhost:8443/backend-api/wham/remote/control/server" + .to_string(), + enroll_url: "https://localhost:8443/backend-api/wham/remote/control/server/enroll" + .to_string(), + } + ); + } + + #[test] + fn normalize_remote_control_url_rejects_unsupported_urls() { + for remote_control_url in [ + "http://chatgpt.com/backend-api", + "http://example.com/backend-api", + "https://example.com/backend-api", + "https://chatgpt.com.evil.com/backend-api", + "https://evilchatgpt.com/backend-api", + "https://foo.localhost/backend-api", + ] { + let err = normalize_remote_control_url(remote_control_url) + .expect_err("unsupported URL should be rejected"); + + assert_eq!(err.kind(), ErrorKind::InvalidInput); + assert_eq!( + err.to_string(), + format!( + "invalid remote control URL `{remote_control_url}`; expected HTTPS URL for chatgpt.com or chatgpt-staging.com, or HTTP/HTTPS URL for localhost" + ) + ); + } + } +} diff --git a/codex-rs/app-server/src/transport/remote_control/tests.rs b/codex-rs/app-server/src/transport/remote_control/tests.rs new file mode 100644 index 0000000000..9d430ccfb3 --- /dev/null +++ b/codex-rs/app-server/src/transport/remote_control/tests.rs @@ -0,0 +1,1395 @@ +use super::enroll::REMOTE_CONTROL_ACCOUNT_ID_HEADER; +use super::enroll::RemoteControlEnrollment; +use super::enroll::load_persisted_remote_control_enrollment; +use super::enroll::update_persisted_remote_control_enrollment; +use super::protocol::ClientEnvelope; +use super::protocol::ClientEvent; +use super::protocol::ClientId; +use super::protocol::normalize_remote_control_url; +use super::websocket::REMOTE_CONTROL_PROTOCOL_VERSION; +use super::*; +use crate::outgoing_message::OutgoingMessage; +use crate::outgoing_message::QueuedOutgoingMessage; +use crate::transport::CHANNEL_CAPACITY; +use crate::transport::TransportEvent; +use base64::Engine; +use codex_app_server_protocol::AuthMode; +use codex_app_server_protocol::ConfigWarningNotification; +use codex_app_server_protocol::JSONRPCMessage; +use codex_app_server_protocol::ServerNotification; +use codex_config::types::AuthCredentialsStoreMode; +use codex_core::test_support::auth_manager_from_auth; +use codex_core::test_support::auth_manager_from_auth_with_home; +use codex_login::AuthDotJson; +use codex_login::AuthManager; +use codex_login::CodexAuth; +use codex_login::save_auth; +use codex_login::token_data::TokenData; +use codex_login::token_data::parse_chatgpt_jwt_claims; +use codex_state::StateRuntime; +use futures::SinkExt; +use futures::StreamExt; +use gethostname::gethostname; +use pretty_assertions::assert_eq; +use serde_json::json; +use std::collections::BTreeMap; +use std::sync::Arc; +use tempfile::TempDir; +use tokio::io::AsyncBufReadExt; +use tokio::io::AsyncReadExt; +use tokio::io::AsyncWriteExt; +use tokio::io::BufReader; +use tokio::net::TcpListener; +use tokio::net::TcpStream; +use tokio::sync::mpsc; +use tokio::sync::oneshot; +use tokio::time::Duration; +use tokio::time::timeout; +use tokio_tungstenite::WebSocketStream; +use tokio_tungstenite::accept_async; +use tokio_tungstenite::accept_hdr_async; +use tokio_tungstenite::tungstenite; +use tokio_util::sync::CancellationToken; + +fn remote_control_auth_manager() -> Arc { + auth_manager_from_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing()) +} + +fn remote_control_auth_manager_with_home(codex_home: &TempDir) -> Arc { + auth_manager_from_auth_with_home( + CodexAuth::create_dummy_chatgpt_auth_for_testing(), + codex_home.path().to_path_buf(), + ) +} + +fn remote_control_auth_dot_json(account_id: Option<&str>) -> AuthDotJson { + #[derive(serde::Serialize)] + struct Header { + alg: &'static str, + typ: &'static str, + } + + let header = Header { + alg: "none", + typ: "JWT", + }; + let payload = serde_json::json!({ + "email": "user@example.com", + "https://api.openai.com/auth": { + "chatgpt_user_id": "user-12345", + "user_id": "user-12345", + "chatgpt_account_id": "account_id" + } + }); + let b64 = |bytes: &[u8]| base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(bytes); + let header_b64 = b64(&serde_json::to_vec(&header).expect("header should serialize")); + let payload_b64 = b64(&serde_json::to_vec(&payload).expect("payload should serialize")); + let fake_jwt = format!("{header_b64}.{payload_b64}.sig"); + + AuthDotJson { + auth_mode: Some(AuthMode::Chatgpt), + openai_api_key: None, + tokens: Some(TokenData { + id_token: parse_chatgpt_jwt_claims(&fake_jwt).expect("fake jwt should parse"), + access_token: "Access Token".to_string(), + refresh_token: "refresh-token".to_string(), + account_id: account_id.map(str::to_string), + }), + last_refresh: Some(chrono::Utc::now()), + } +} + +async fn remote_control_state_runtime(codex_home: &TempDir) -> Arc { + StateRuntime::init(codex_home.path().to_path_buf(), "test-provider".to_string()) + .await + .expect("state runtime should initialize") +} + +fn remote_control_url_for_listener(listener: &TcpListener) -> String { + format!( + "http://localhost:{}/backend-api/", + listener + .local_addr() + .expect("listener should have a local addr") + .port() + ) +} + +#[tokio::test] +async fn remote_control_transport_manages_virtual_clients_and_routes_messages() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + let codex_home = TempDir::new().expect("temp dir should create"); + let (transport_event_tx, mut transport_event_rx) = + mpsc::channel::(CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let (remote_task, _remote_handle) = start_remote_control( + remote_control_url, + Some(remote_control_state_runtime(&codex_home).await), + remote_control_auth_manager(), + transport_event_tx, + shutdown_token.clone(), + /*app_server_client_name_rx*/ None, + /*initial_enabled*/ true, + ) + .await + .expect("remote control should start"); + let enroll_request = accept_http_request(&listener).await; + assert_eq!( + enroll_request.request_line, + "POST /backend-api/wham/remote/control/server/enroll HTTP/1.1" + ); + respond_with_json( + enroll_request.stream, + json!({ "server_id": "srv_e_test", "environment_id": "env_test" }), + ) + .await; + let mut websocket = accept_remote_control_connection(&listener).await; + + let client_id = ClientId("client-1".to_string()); + send_client_event( + &mut websocket, + ClientEnvelope { + event: ClientEvent::Ping, + client_id: client_id.clone(), + stream_id: None, + seq_id: None, + cursor: None, + }, + ) + .await; + assert_eq!( + read_server_event(&mut websocket).await, + json!({ + "type": "pong", + "client_id": "client-1", + "seq_id": 0, + "status": "unknown", + }) + ); + + send_client_event( + &mut websocket, + ClientEnvelope { + event: ClientEvent::ClientMessage { + message: JSONRPCMessage::Notification( + codex_app_server_protocol::JSONRPCNotification { + method: "initialized".to_string(), + params: None, + }, + ), + }, + client_id: client_id.clone(), + stream_id: None, + seq_id: Some(0), + cursor: None, + }, + ) + .await; + assert!( + timeout(Duration::from_millis(100), transport_event_rx.recv()) + .await + .is_err(), + "non-initialize client messages should be ignored before connection creation" + ); + + let initialize_message = JSONRPCMessage::Request(codex_app_server_protocol::JSONRPCRequest { + id: codex_app_server_protocol::RequestId::Integer(1), + method: "initialize".to_string(), + params: Some(json!({ + "clientInfo": { + "name": "remote-test-client", + "version": "0.1.0" + } + })), + trace: None, + }); + send_client_event( + &mut websocket, + ClientEnvelope { + event: ClientEvent::ClientMessage { + message: initialize_message.clone(), + }, + client_id: client_id.clone(), + stream_id: None, + seq_id: Some(1), + cursor: None, + }, + ) + .await; + + let (connection_id, writer) = match timeout(Duration::from_secs(5), transport_event_rx.recv()) + .await + .expect("connection open should arrive in time") + .expect("connection open should exist") + { + TransportEvent::ConnectionOpened { + connection_id, + writer, + .. + } => (connection_id, writer), + other => panic!("expected connection open event, got {other:?}"), + }; + + match timeout(Duration::from_secs(5), transport_event_rx.recv()) + .await + .expect("initialize message should arrive in time") + .expect("initialize message should exist") + { + TransportEvent::IncomingMessage { + connection_id: incoming_connection_id, + message, + } => { + assert_eq!(incoming_connection_id, connection_id); + assert_eq!(message, initialize_message); + } + other => panic!("expected initialize incoming message, got {other:?}"), + } + + let followup_message = + JSONRPCMessage::Notification(codex_app_server_protocol::JSONRPCNotification { + method: "initialized".to_string(), + params: None, + }); + send_client_event( + &mut websocket, + ClientEnvelope { + event: ClientEvent::ClientMessage { + message: followup_message.clone(), + }, + client_id: client_id.clone(), + stream_id: None, + seq_id: Some(2), + cursor: None, + }, + ) + .await; + match timeout(Duration::from_secs(5), transport_event_rx.recv()) + .await + .expect("followup message should arrive in time") + .expect("followup message should exist") + { + TransportEvent::IncomingMessage { + connection_id: incoming_connection_id, + message, + } => { + assert_eq!(incoming_connection_id, connection_id); + assert_eq!(message, followup_message); + } + other => panic!("expected followup incoming message, got {other:?}"), + } + + send_client_event( + &mut websocket, + ClientEnvelope { + event: ClientEvent::Ping, + client_id: client_id.clone(), + stream_id: None, + seq_id: None, + cursor: None, + }, + ) + .await; + assert_eq!( + read_server_event(&mut websocket).await, + json!({ + "type": "pong", + "client_id": "client-1", + "seq_id": 1, + "status": "active", + }) + ); + + writer + .send(QueuedOutgoingMessage::new( + OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning( + ConfigWarningNotification { + summary: "test".to_string(), + details: None, + path: None, + range: None, + }, + )), + )) + .await + .expect("remote writer should accept outgoing message"); + assert_eq!( + read_server_event(&mut websocket).await, + json!({ + "type": "server_message", + "client_id": "client-1", + "seq_id": 2, + "message": { + "method": "configWarning", + "params": { + "summary": "test", + "details": null, + } + } + }) + ); + + send_client_event( + &mut websocket, + ClientEnvelope { + event: ClientEvent::ClientClosed, + client_id: client_id.clone(), + stream_id: None, + seq_id: None, + cursor: None, + }, + ) + .await; + match timeout(Duration::from_secs(5), transport_event_rx.recv()) + .await + .expect("connection close should arrive in time") + .expect("connection close should exist") + { + TransportEvent::ConnectionClosed { + connection_id: closed_connection_id, + } => { + assert_eq!(closed_connection_id, connection_id); + } + other => panic!("expected connection close event, got {other:?}"), + } + + send_client_event( + &mut websocket, + ClientEnvelope { + event: ClientEvent::Ping, + client_id, + stream_id: None, + seq_id: None, + cursor: None, + }, + ) + .await; + assert_eq!( + read_server_event(&mut websocket).await, + json!({ + "type": "pong", + "client_id": "client-1", + "seq_id": 3, + "status": "unknown", + }) + ); + + shutdown_token.cancel(); + let _ = remote_task.await; +} + +#[tokio::test] +async fn remote_control_transport_reconnects_after_disconnect() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + let codex_home = TempDir::new().expect("temp dir should create"); + let (transport_event_tx, mut transport_event_rx) = + mpsc::channel::(CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let (remote_task, _remote_handle) = start_remote_control( + remote_control_url, + Some(remote_control_state_runtime(&codex_home).await), + remote_control_auth_manager(), + transport_event_tx, + shutdown_token.clone(), + /*app_server_client_name_rx*/ None, + /*initial_enabled*/ true, + ) + .await + .expect("remote control should start"); + + let enroll_request = accept_http_request(&listener).await; + assert_eq!( + enroll_request.request_line, + "POST /backend-api/wham/remote/control/server/enroll HTTP/1.1" + ); + respond_with_json( + enroll_request.stream, + json!({ "server_id": "srv_e_test", "environment_id": "env_test" }), + ) + .await; + let mut first_websocket = accept_remote_control_connection(&listener).await; + first_websocket + .close(None) + .await + .expect("first websocket should close"); + drop(first_websocket); + + let mut second_websocket = accept_remote_control_connection(&listener).await; + send_client_event( + &mut second_websocket, + ClientEnvelope { + event: ClientEvent::ClientMessage { + message: JSONRPCMessage::Request(codex_app_server_protocol::JSONRPCRequest { + id: codex_app_server_protocol::RequestId::Integer(2), + method: "initialize".to_string(), + params: Some(json!({ + "clientInfo": { + "name": "remote-test-client", + "version": "0.1.0" + } + })), + trace: None, + }), + }, + client_id: ClientId("client-2".to_string()), + stream_id: None, + seq_id: Some(0), + cursor: None, + }, + ) + .await; + + match timeout(Duration::from_secs(5), transport_event_rx.recv()) + .await + .expect("reconnected initialize should arrive in time") + .expect("reconnected initialize should exist") + { + TransportEvent::ConnectionOpened { .. } => {} + other => panic!("expected connection open after reconnect, got {other:?}"), + } + + shutdown_token.cancel(); + let _ = remote_task.await; +} + +#[tokio::test] +async fn remote_control_start_allows_remote_control_invalid_url_when_disabled() { + let (transport_event_tx, _transport_event_rx) = + mpsc::channel::(CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let (remote_task, _remote_handle) = start_remote_control( + "https://internal.example.com/backend-api/".to_string(), + /*state_db*/ None, + remote_control_auth_manager(), + transport_event_tx, + shutdown_token.clone(), + /*app_server_client_name_rx*/ None, + /*initial_enabled*/ false, + ) + .await + .expect("disabled remote control should not validate the URL at startup"); + + shutdown_token.cancel(); + timeout(Duration::from_secs(1), remote_task) + .await + .expect("remote control task should stop") + .expect("remote control task should join"); +} + +#[tokio::test] +async fn remote_control_handle_set_enabled_stops_and_restarts_connections() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + let codex_home = TempDir::new().expect("temp dir should create"); + let (transport_event_tx, _transport_event_rx) = + mpsc::channel::(CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let (remote_task, remote_handle) = start_remote_control( + remote_control_url, + Some(remote_control_state_runtime(&codex_home).await), + remote_control_auth_manager(), + transport_event_tx, + shutdown_token.clone(), + /*app_server_client_name_rx*/ None, + /*initial_enabled*/ true, + ) + .await + .expect("remote control should start"); + + let enroll_request = accept_http_request(&listener).await; + assert_eq!( + enroll_request.request_line, + "POST /backend-api/wham/remote/control/server/enroll HTTP/1.1" + ); + respond_with_json( + enroll_request.stream, + json!({ "server_id": "srv_e_test", "environment_id": "env_test" }), + ) + .await; + let mut first_websocket = accept_remote_control_connection(&listener).await; + + remote_handle.set_enabled(/*enabled*/ false); + timeout(Duration::from_secs(1), first_websocket.next()) + .await + .expect("disabling remote control should close the websocket"); + timeout(Duration::from_millis(100), listener.accept()) + .await + .expect_err("disabled remote control should not reconnect"); + + remote_handle.set_enabled(/*enabled*/ true); + let mut second_websocket = accept_remote_control_connection(&listener).await; + second_websocket + .close(None) + .await + .expect("second websocket should close"); + + shutdown_token.cancel(); + let _ = remote_task.await; +} + +#[tokio::test] +async fn remote_control_transport_clears_outgoing_buffer_when_backend_acks() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + let codex_home = TempDir::new().expect("temp dir should create"); + let (transport_event_tx, mut transport_event_rx) = + mpsc::channel::(CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let (remote_task, _remote_handle) = start_remote_control( + remote_control_url, + Some(remote_control_state_runtime(&codex_home).await), + remote_control_auth_manager(), + transport_event_tx, + shutdown_token.clone(), + /*app_server_client_name_rx*/ None, + /*initial_enabled*/ true, + ) + .await + .expect("remote control should start"); + + let enroll_request = accept_http_request(&listener).await; + respond_with_json( + enroll_request.stream, + json!({ "server_id": "srv_e_test", "environment_id": "env_test" }), + ) + .await; + let mut first_websocket = accept_remote_control_connection(&listener).await; + + let client_id = ClientId("client-1".to_string()); + let initialize_message = JSONRPCMessage::Request(codex_app_server_protocol::JSONRPCRequest { + id: codex_app_server_protocol::RequestId::Integer(1), + method: "initialize".to_string(), + params: Some(json!({ + "clientInfo": { + "name": "remote-test-client", + "version": "0.1.0" + } + })), + trace: None, + }); + send_client_event( + &mut first_websocket, + ClientEnvelope { + event: ClientEvent::ClientMessage { + message: initialize_message, + }, + client_id: client_id.clone(), + stream_id: None, + seq_id: Some(0), + cursor: None, + }, + ) + .await; + + let writer = match timeout(Duration::from_secs(5), transport_event_rx.recv()) + .await + .expect("connection open should arrive in time") + .expect("connection open should exist") + { + TransportEvent::ConnectionOpened { writer, .. } => writer, + other => panic!("expected connection open event, got {other:?}"), + }; + match timeout(Duration::from_secs(5), transport_event_rx.recv()) + .await + .expect("initialize message should arrive in time") + .expect("initialize message should exist") + { + TransportEvent::IncomingMessage { .. } => {} + other => panic!("expected initialize incoming message, got {other:?}"), + } + + writer + .send(QueuedOutgoingMessage::new( + OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning( + ConfigWarningNotification { + summary: "stale".to_string(), + details: None, + path: None, + range: None, + }, + )), + )) + .await + .expect("remote writer should accept outgoing message"); + assert_eq!( + read_server_event(&mut first_websocket).await, + json!({ + "type": "server_message", + "client_id": "client-1", + "seq_id": 0, + "message": { + "method": "configWarning", + "params": { + "summary": "stale", + "details": null, + } + } + }) + ); + + send_client_event( + &mut first_websocket, + ClientEnvelope { + event: ClientEvent::Ack, + client_id: client_id.clone(), + stream_id: None, + seq_id: Some(0), + cursor: None, + }, + ) + .await; + + send_client_event( + &mut first_websocket, + ClientEnvelope { + event: ClientEvent::ClientClosed, + client_id: client_id.clone(), + stream_id: None, + seq_id: None, + cursor: None, + }, + ) + .await; + match timeout(Duration::from_secs(5), transport_event_rx.recv()) + .await + .expect("connection close should arrive in time") + .expect("connection close should exist") + { + TransportEvent::ConnectionClosed { .. } => {} + other => panic!("expected connection close event, got {other:?}"), + } + + first_websocket + .close(None) + .await + .expect("first websocket should close"); + drop(first_websocket); + + let mut second_websocket = accept_remote_control_connection(&listener).await; + send_client_event( + &mut second_websocket, + ClientEnvelope { + event: ClientEvent::Ping, + client_id, + stream_id: None, + seq_id: None, + cursor: None, + }, + ) + .await; + assert_eq!( + read_server_event(&mut second_websocket).await, + json!({ + "type": "pong", + "client_id": "client-1", + "seq_id": 1, + "status": "unknown", + }) + ); + + shutdown_token.cancel(); + let _ = remote_task.await; +} + +#[tokio::test] +async fn remote_control_http_mode_enrolls_before_connecting() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + let codex_home = TempDir::new().expect("temp dir should create"); + let (transport_event_tx, mut transport_event_rx) = + mpsc::channel::(CHANNEL_CAPACITY); + let expected_server_name = gethostname().to_string_lossy().trim().to_string(); + let shutdown_token = CancellationToken::new(); + let (remote_task, _remote_handle) = start_remote_control( + remote_control_url, + Some(remote_control_state_runtime(&codex_home).await), + remote_control_auth_manager(), + transport_event_tx, + shutdown_token.clone(), + /*app_server_client_name_rx*/ None, + /*initial_enabled*/ true, + ) + .await + .expect("remote control should start"); + + let enroll_request = accept_http_request(&listener).await; + assert_eq!( + enroll_request.request_line, + "POST /backend-api/wham/remote/control/server/enroll HTTP/1.1" + ); + assert_eq!( + enroll_request.headers.get("authorization"), + Some(&"Bearer Access Token".to_string()) + ); + assert_eq!( + enroll_request.headers.get(REMOTE_CONTROL_ACCOUNT_ID_HEADER), + Some(&"account_id".to_string()) + ); + assert_eq!( + serde_json::from_str::(&enroll_request.body) + .expect("enroll body should deserialize"), + json!({ + "name": expected_server_name, + "os": std::env::consts::OS, + "arch": std::env::consts::ARCH, + "app_server_version": env!("CARGO_PKG_VERSION"), + }) + ); + respond_with_json( + enroll_request.stream, + json!({ "server_id": "srv_e_test", "environment_id": "env_test" }), + ) + .await; + + let (handshake_request, mut websocket) = + accept_remote_control_backend_connection(&listener).await; + assert_eq!( + handshake_request.path, + "/backend-api/wham/remote/control/server" + ); + assert_eq!( + handshake_request.headers.get("authorization"), + Some(&"Bearer Access Token".to_string()) + ); + assert_eq!( + handshake_request + .headers + .get(REMOTE_CONTROL_ACCOUNT_ID_HEADER), + Some(&"account_id".to_string()) + ); + assert_eq!( + handshake_request.headers.get("x-codex-server-id"), + Some(&"srv_e_test".to_string()) + ); + assert_eq!( + handshake_request.headers.get("x-codex-name"), + Some(&base64::engine::general_purpose::STANDARD.encode(&expected_server_name)) + ); + assert_eq!( + handshake_request.headers.get("x-codex-protocol-version"), + Some(&REMOTE_CONTROL_PROTOCOL_VERSION.to_string()) + ); + + let backend_client_id = ClientId("backend-test-client".to_string()); + let writer = { + let initialize_message = + JSONRPCMessage::Request(codex_app_server_protocol::JSONRPCRequest { + id: codex_app_server_protocol::RequestId::Integer(11), + method: "initialize".to_string(), + params: Some(json!({ + "clientInfo": { + "name": "remote-backend-client", + "version": "0.1.0" + } + })), + trace: None, + }); + send_client_event( + &mut websocket, + ClientEnvelope { + event: ClientEvent::ClientMessage { + message: initialize_message.clone(), + }, + client_id: backend_client_id.clone(), + stream_id: None, + seq_id: Some(0), + cursor: None, + }, + ) + .await; + + let (connection_id, writer) = + match timeout(Duration::from_secs(5), transport_event_rx.recv()) + .await + .expect("connection open should arrive in time") + .expect("connection open should exist") + { + TransportEvent::ConnectionOpened { + connection_id, + writer, + .. + } => (connection_id, writer), + other => panic!("expected connection open event, got {other:?}"), + }; + + match timeout(Duration::from_secs(5), transport_event_rx.recv()) + .await + .expect("initialize message should arrive in time") + .expect("initialize message should exist") + { + TransportEvent::IncomingMessage { + connection_id: incoming_connection_id, + message, + } => { + assert_eq!(incoming_connection_id, connection_id); + assert_eq!(message, initialize_message); + } + other => panic!("expected initialize incoming message, got {other:?}"), + } + writer + }; + + writer + .send(QueuedOutgoingMessage::new(OutgoingMessage::Response( + crate::outgoing_message::OutgoingResponse { + id: codex_app_server_protocol::RequestId::Integer(11), + result: json!({ + "userAgent": "codex-test-agent" + }), + }, + ))) + .await + .expect("remote writer should accept initialize response"); + assert_eq!( + read_server_event(&mut websocket).await, + json!({ + "type": "server_message", + "client_id": backend_client_id.0.clone(), + "seq_id": 0, + "message": { + "id": 11, + "result": { + "userAgent": "codex-test-agent", + } + } + }) + ); + + writer + .send(QueuedOutgoingMessage::new( + OutgoingMessage::AppServerNotification(ServerNotification::ConfigWarning( + ConfigWarningNotification { + summary: "backend".to_string(), + details: None, + path: None, + range: None, + }, + )), + )) + .await + .expect("remote writer should accept outgoing message"); + assert_eq!( + read_server_event(&mut websocket).await, + json!({ + "type": "server_message", + "client_id": backend_client_id.0.clone(), + "seq_id": 1, + "message": { + "method": "configWarning", + "params": { + "summary": "backend", + "details": null, + } + } + }) + ); + + shutdown_token.cancel(); + let _ = remote_task.await; +} + +#[tokio::test] +async fn remote_control_http_mode_reuses_persisted_enrollment_before_reenrolling() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + let codex_home = TempDir::new().expect("temp dir should create"); + let state_db = remote_control_state_runtime(&codex_home).await; + let remote_control_target = + normalize_remote_control_url(&remote_control_url).expect("target should parse"); + let persisted_enrollment = RemoteControlEnrollment { + account_id: "account_id".to_string(), + environment_id: "env_persisted".to_string(), + server_id: "srv_e_persisted".to_string(), + server_name: "persisted-server".to_string(), + }; + update_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &remote_control_target, + "account_id", + /*app_server_client_name*/ None, + Some(&persisted_enrollment), + ) + .await + .expect("persisted enrollment should save"); + + let (transport_event_tx, _transport_event_rx) = + mpsc::channel::(CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let (remote_task, _remote_handle) = start_remote_control( + remote_control_url, + Some(state_db.clone()), + remote_control_auth_manager_with_home(&codex_home), + transport_event_tx, + shutdown_token.clone(), + /*app_server_client_name_rx*/ None, + /*initial_enabled*/ true, + ) + .await + .expect("remote control should start"); + + let (handshake_request, _websocket) = accept_remote_control_backend_connection(&listener).await; + assert_eq!( + handshake_request.path, + "/backend-api/wham/remote/control/server" + ); + assert_eq!( + handshake_request.headers.get("x-codex-server-id"), + Some(&persisted_enrollment.server_id) + ); + assert_eq!( + load_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &remote_control_target, + "account_id", + /*app_server_client_name*/ None, + ) + .await, + Some(persisted_enrollment) + ); + + shutdown_token.cancel(); + let _ = remote_task.await; +} + +#[tokio::test] +async fn remote_control_stdio_mode_waits_for_client_name_before_connecting() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + let codex_home = TempDir::new().expect("temp dir should create"); + let state_db = remote_control_state_runtime(&codex_home).await; + let remote_control_target = + normalize_remote_control_url(&remote_control_url).expect("target should parse"); + let app_server_client_name = "stdio-client"; + let persisted_enrollment = RemoteControlEnrollment { + account_id: "account_id".to_string(), + environment_id: "env_persisted".to_string(), + server_id: "srv_e_persisted".to_string(), + server_name: "persisted-server".to_string(), + }; + update_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &remote_control_target, + "account_id", + Some(app_server_client_name), + Some(&persisted_enrollment), + ) + .await + .expect("persisted enrollment should save"); + + let (transport_event_tx, _transport_event_rx) = + mpsc::channel::(CHANNEL_CAPACITY); + let (app_server_client_name_tx, app_server_client_name_rx) = oneshot::channel::(); + let shutdown_token = CancellationToken::new(); + let (remote_task, _remote_handle) = start_remote_control( + remote_control_url, + Some(state_db.clone()), + remote_control_auth_manager_with_home(&codex_home), + transport_event_tx, + shutdown_token.clone(), + Some(app_server_client_name_rx), + /*initial_enabled*/ true, + ) + .await + .expect("remote control should start"); + + timeout(Duration::from_millis(100), listener.accept()) + .await + .expect_err("remote control should wait for the stdio client name"); + + let _ = app_server_client_name_tx.send(app_server_client_name.to_string()); + let (handshake_request, _websocket) = accept_remote_control_backend_connection(&listener).await; + assert_eq!( + handshake_request.headers.get("x-codex-server-id"), + Some(&persisted_enrollment.server_id) + ); + + shutdown_token.cancel(); + let _ = remote_task.await; +} + +#[tokio::test] +async fn remote_control_waits_for_account_id_before_enrolling() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + let codex_home = TempDir::new().expect("temp dir should create"); + save_auth( + codex_home.path(), + &remote_control_auth_dot_json(/*account_id*/ None), + AuthCredentialsStoreMode::File, + ) + .expect("auth without account id should save"); + let state_db = remote_control_state_runtime(&codex_home).await; + let auth_manager = AuthManager::shared( + codex_home.path().to_path_buf(), + /*enable_codex_api_key_env*/ false, + AuthCredentialsStoreMode::File, + ); + let expected_server_name = gethostname().to_string_lossy().trim().to_string(); + let expected_enrollment = RemoteControlEnrollment { + account_id: "account_id".to_string(), + environment_id: "env_ready".to_string(), + server_id: "srv_e_ready".to_string(), + server_name: expected_server_name, + }; + + let (transport_event_tx, _transport_event_rx) = + mpsc::channel::(CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let (remote_task, _remote_handle) = start_remote_control( + remote_control_url, + Some(state_db.clone()), + auth_manager, + transport_event_tx, + shutdown_token.clone(), + /*app_server_client_name_rx*/ None, + /*initial_enabled*/ true, + ) + .await + .expect("remote control should start before account id is available"); + + timeout(Duration::from_millis(100), listener.accept()) + .await + .expect_err("remote control should wait for account id before enrolling"); + + save_auth( + codex_home.path(), + &remote_control_auth_dot_json(Some("account_id")), + AuthCredentialsStoreMode::File, + ) + .expect("auth with account id should save"); + + let enroll_request = accept_http_request(&listener).await; + assert_eq!( + enroll_request.request_line, + "POST /backend-api/wham/remote/control/server/enroll HTTP/1.1" + ); + respond_with_json( + enroll_request.stream, + json!({ + "server_id": expected_enrollment.server_id, + "environment_id": expected_enrollment.environment_id, + }), + ) + .await; + + let (handshake_request, _websocket) = accept_remote_control_backend_connection(&listener).await; + assert_eq!( + handshake_request.headers.get("x-codex-server-id"), + Some(&expected_enrollment.server_id) + ); + + shutdown_token.cancel(); + let _ = remote_task.await; +} + +#[tokio::test] +async fn remote_control_http_mode_clears_stale_persisted_enrollment_after_404() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + let codex_home = TempDir::new().expect("temp dir should create"); + let state_db = remote_control_state_runtime(&codex_home).await; + let remote_control_target = + normalize_remote_control_url(&remote_control_url).expect("target should parse"); + let expected_server_name = gethostname().to_string_lossy().trim().to_string(); + let stale_enrollment = RemoteControlEnrollment { + account_id: "account_id".to_string(), + environment_id: "env_stale".to_string(), + server_id: "srv_e_stale".to_string(), + server_name: "stale-server".to_string(), + }; + let refreshed_enrollment = RemoteControlEnrollment { + account_id: "account_id".to_string(), + environment_id: "env_refreshed".to_string(), + server_id: "srv_e_refreshed".to_string(), + server_name: expected_server_name, + }; + update_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &remote_control_target, + "account_id", + /*app_server_client_name*/ None, + Some(&stale_enrollment), + ) + .await + .expect("stale enrollment should save"); + + let (transport_event_tx, _transport_event_rx) = + mpsc::channel::(CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let (remote_task, _remote_handle) = start_remote_control( + remote_control_url, + Some(state_db.clone()), + remote_control_auth_manager_with_home(&codex_home), + transport_event_tx, + shutdown_token.clone(), + /*app_server_client_name_rx*/ None, + /*initial_enabled*/ true, + ) + .await + .expect("remote control should start"); + + let websocket_request = accept_http_request(&listener).await; + assert_eq!( + websocket_request.request_line, + "GET /backend-api/wham/remote/control/server HTTP/1.1" + ); + assert_eq!( + websocket_request.headers.get("x-codex-server-id"), + Some(&stale_enrollment.server_id) + ); + respond_with_status(websocket_request.stream, "404 Not Found", "").await; + + let enroll_request = accept_http_request(&listener).await; + assert_eq!( + enroll_request.request_line, + "POST /backend-api/wham/remote/control/server/enroll HTTP/1.1" + ); + respond_with_json( + enroll_request.stream, + json!({ + "server_id": refreshed_enrollment.server_id, + "environment_id": refreshed_enrollment.environment_id, + }), + ) + .await; + + let (handshake_request, _websocket) = accept_remote_control_backend_connection(&listener).await; + assert_eq!( + handshake_request.headers.get("x-codex-server-id"), + Some(&refreshed_enrollment.server_id) + ); + assert_eq!( + load_persisted_remote_control_enrollment( + Some(state_db.as_ref()), + &remote_control_target, + "account_id", + /*app_server_client_name*/ None, + ) + .await, + Some(refreshed_enrollment) + ); + + shutdown_token.cancel(); + let _ = remote_task.await; +} + +#[derive(Debug)] +struct CapturedHttpRequest { + stream: TcpStream, + request_line: String, + headers: BTreeMap, + body: String, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +struct CapturedWebSocketRequest { + path: String, + headers: BTreeMap, +} + +async fn accept_remote_control_connection(listener: &TcpListener) -> WebSocketStream { + let (stream, _) = timeout(Duration::from_secs(5), listener.accept()) + .await + .expect("remote control should connect in time") + .expect("listener accept should succeed"); + accept_async(stream) + .await + .expect("websocket handshake should succeed") +} + +async fn accept_http_request(listener: &TcpListener) -> CapturedHttpRequest { + let (stream, _) = timeout(Duration::from_secs(5), listener.accept()) + .await + .expect("HTTP request should arrive in time") + .expect("listener accept should succeed"); + let mut reader = BufReader::new(stream); + + let mut request_line = String::new(); + reader + .read_line(&mut request_line) + .await + .expect("request line should read"); + let request_line = request_line.trim_end_matches("\r\n").to_string(); + + let mut headers = BTreeMap::new(); + loop { + let mut line = String::new(); + reader + .read_line(&mut line) + .await + .expect("header line should read"); + if line == "\r\n" { + break; + } + let line = line.trim_end_matches("\r\n"); + let (name, value) = line.split_once(':').expect("header should contain colon"); + headers.insert(name.to_ascii_lowercase(), value.trim().to_string()); + } + + let content_length = headers + .get("content-length") + .and_then(|value| value.parse::().ok()) + .unwrap_or(0); + let mut body = vec![0; content_length]; + reader + .read_exact(&mut body) + .await + .expect("request body should read"); + + CapturedHttpRequest { + stream: reader.into_inner(), + request_line, + headers, + body: String::from_utf8(body).expect("body should be utf-8"), + } +} + +async fn respond_with_json(mut stream: TcpStream, body: serde_json::Value) { + let body = body.to_string(); + let response = format!( + "HTTP/1.1 200 OK\r\ncontent-type: application/json\r\ncontent-length: {}\r\nconnection: close\r\n\r\n{body}", + body.len() + ); + stream + .write_all(response.as_bytes()) + .await + .expect("response should write"); + stream.flush().await.expect("response should flush"); +} + +async fn respond_with_status(stream: TcpStream, status: &str, body: &str) { + respond_with_status_and_headers(stream, status, &[], body).await; +} + +async fn respond_with_status_and_headers( + mut stream: TcpStream, + status: &str, + headers: &[(&str, &str)], + body: &str, +) { + let extra_headers = headers + .iter() + .map(|(name, value)| format!("{name}: {value}\r\n")) + .collect::(); + let response = format!( + "HTTP/1.1 {status}\r\ncontent-type: text/plain\r\ncontent-length: {}\r\nconnection: close\r\n{extra_headers}\r\n{body}", + body.len(), + ); + stream + .write_all(response.as_bytes()) + .await + .expect("response should write"); + stream.flush().await.expect("response should flush"); +} + +async fn accept_remote_control_backend_connection( + listener: &TcpListener, +) -> (CapturedWebSocketRequest, WebSocketStream) { + let (stream, _) = timeout(Duration::from_secs(5), listener.accept()) + .await + .expect("websocket request should arrive in time") + .expect("listener accept should succeed"); + let captured_request = Arc::new(std::sync::Mutex::new(None::)); + let captured_request_for_callback = captured_request.clone(); + let websocket = accept_hdr_async( + stream, + move |request: &tungstenite::handshake::server::Request, + response: tungstenite::handshake::server::Response| { + let headers = request + .headers() + .iter() + .map(|(name, value)| { + ( + name.as_str().to_ascii_lowercase(), + value + .to_str() + .expect("header should be valid utf-8") + .to_string(), + ) + }) + .collect::>(); + *captured_request_for_callback + .lock() + .expect("capture lock should acquire") = Some(CapturedWebSocketRequest { + path: request.uri().path().to_string(), + headers, + }); + Ok(response) + }, + ) + .await + .expect("websocket handshake should succeed"); + let captured_request = captured_request + .lock() + .expect("capture lock should acquire") + .clone() + .expect("websocket request should be captured"); + (captured_request, websocket) +} + +async fn send_client_event( + websocket: &mut WebSocketStream, + client_envelope: ClientEnvelope, +) { + let payload = serde_json::to_string(&client_envelope).expect("client event should serialize"); + websocket + .send(tungstenite::Message::Text(payload.into())) + .await + .expect("client event should send"); +} + +async fn read_server_event(websocket: &mut WebSocketStream) -> serde_json::Value { + loop { + let frame = timeout(Duration::from_secs(5), websocket.next()) + .await + .expect("server event should arrive in time") + .expect("websocket should stay open") + .expect("websocket frame should be readable"); + match frame { + tungstenite::Message::Text(text) => { + let mut event: serde_json::Value = + serde_json::from_str(text.as_ref()).expect("server event should deserialize"); + if let Some(stream_id) = event + .as_object_mut() + .and_then(|event| event.remove("stream_id")) + { + assert!(stream_id.is_string(), "stream_id should be a string"); + } + return event; + } + tungstenite::Message::Ping(payload) => { + websocket + .send(tungstenite::Message::Pong(payload)) + .await + .expect("websocket pong should send"); + } + tungstenite::Message::Pong(_) => {} + tungstenite::Message::Close(frame) => { + panic!("unexpected websocket close frame: {frame:?}"); + } + tungstenite::Message::Binary(_) => { + panic!("unexpected binary websocket frame"); + } + tungstenite::Message::Frame(_) => {} + } + } +} diff --git a/codex-rs/app-server/src/transport/remote_control/websocket.rs b/codex-rs/app-server/src/transport/remote_control/websocket.rs new file mode 100644 index 0000000000..a0387ef6c3 --- /dev/null +++ b/codex-rs/app-server/src/transport/remote_control/websocket.rs @@ -0,0 +1,1474 @@ +use crate::transport::TransportEvent; +use crate::transport::remote_control::client_tracker::ClientTracker; +use crate::transport::remote_control::client_tracker::REMOTE_CONTROL_IDLE_SWEEP_INTERVAL; +use crate::transport::remote_control::enroll::RemoteControlConnectionAuth; +use crate::transport::remote_control::enroll::RemoteControlEnrollment; +use crate::transport::remote_control::enroll::enroll_remote_control_server; +use crate::transport::remote_control::enroll::format_headers; +use crate::transport::remote_control::enroll::load_persisted_remote_control_enrollment; +use crate::transport::remote_control::enroll::preview_remote_control_response_body; +use crate::transport::remote_control::enroll::update_persisted_remote_control_enrollment; + +use super::protocol::ClientEnvelope; +use super::protocol::ClientEvent; +use super::protocol::ClientId; +use super::protocol::RemoteControlTarget; +use super::protocol::ServerEnvelope; +use axum::http::HeaderValue; +use base64::Engine; +use codex_core::util::backoff; +use codex_login::AuthManager; +use codex_login::UnauthorizedRecovery; +use codex_state::StateRuntime; +use codex_utils_rustls_provider::ensure_rustls_crypto_provider; +use futures::SinkExt; +use futures::StreamExt; +use futures::stream::SplitSink; +use futures::stream::SplitStream; +use std::collections::BTreeMap; +use std::collections::HashMap; +use std::io; +use std::io::ErrorKind; +use std::sync::Arc; +use tokio::net::TcpStream; +use tokio::sync::Mutex; +use tokio::sync::mpsc; +use tokio::sync::oneshot; +use tokio::sync::watch; +use tokio::time::MissedTickBehavior; +use tokio_tungstenite::MaybeTlsStream; +use tokio_tungstenite::WebSocketStream; +use tokio_tungstenite::connect_async; +use tokio_tungstenite::tungstenite; +use tokio_tungstenite::tungstenite::client::IntoClientRequest; +use tokio_util::sync::CancellationToken; +use tracing::error; +use tracing::info; +use tracing::warn; + +pub(super) const REMOTE_CONTROL_PROTOCOL_VERSION: &str = "2"; +pub(super) const REMOTE_CONTROL_ACCOUNT_ID_HEADER: &str = "chatgpt-account-id"; +const REMOTE_CONTROL_SUBSCRIBE_CURSOR_HEADER: &str = "x-codex-subscribe-cursor"; +const REMOTE_CONTROL_WEBSOCKET_PING_INTERVAL: std::time::Duration = + std::time::Duration::from_secs(10); +const REMOTE_CONTROL_WEBSOCKET_PONG_TIMEOUT: std::time::Duration = + std::time::Duration::from_secs(60); +const REMOTE_CONTROL_ACCOUNT_ID_RETRY_INTERVAL: std::time::Duration = + std::time::Duration::from_secs(1); + +struct BoundedOutboundBuffer { + // Remote-control acks are generated by the backend at client scope, so + // retransmit retention is keyed by client_id only. stream_id stays on each + // envelope for routing, but it is not part of the ack cursor. + buffer_by_client: HashMap>, + used_tx: watch::Sender, +} + +impl BoundedOutboundBuffer { + fn new() -> (Self, watch::Receiver) { + let (used_tx, used_rx) = watch::channel(0); + let buffer = Self { + buffer_by_client: HashMap::new(), + used_tx, + }; + (buffer, used_rx) + } + + fn insert(&mut self, server_envelope: &ServerEnvelope) { + self.buffer_by_client + .entry(server_envelope.client_id.clone()) + .or_default() + .insert(server_envelope.seq_id, server_envelope.clone()); + self.used_tx.send_modify(|used| *used += 1); + } + + fn ack(&mut self, client_id: &ClientId, acked_seq_id: u64) { + let Some(buffer) = self.buffer_by_client.get_mut(client_id) else { + return; + }; + while let Some(seq_id) = buffer.first_key_value().map(|(seq_id, _)| seq_id) + && *seq_id <= acked_seq_id + { + buffer.pop_first(); + self.used_tx.send_modify(|used| *used -= 1); + } + if buffer.is_empty() { + self.buffer_by_client.remove(client_id); + } + } + + fn server_envelopes(&self) -> impl Iterator { + self.buffer_by_client + .values() + .flat_map(|buffer| buffer.values()) + } +} + +struct WebsocketState { + outbound_buffer: BoundedOutboundBuffer, + subscribe_cursor: Option, + next_seq_id: u64, +} + +pub(crate) struct RemoteControlWebsocket { + remote_control_url: String, + remote_control_target: Option, + state_db: Option>, + auth_manager: Arc, + shutdown_token: CancellationToken, + reconnect_attempt: u64, + enrollment: Option, + auth_recovery: UnauthorizedRecovery, + client_tracker: Arc>, + state: Arc>, + server_event_rx: Arc>>, + used_rx: watch::Receiver, + enabled_rx: watch::Receiver, +} + +enum ConnectOutcome { + Connected(Box>>), + Disabled, + Shutdown, +} + +impl RemoteControlWebsocket { + pub(crate) fn new( + remote_control_url: String, + remote_control_target: Option, + state_db: Option>, + auth_manager: Arc, + transport_event_tx: mpsc::Sender, + shutdown_token: CancellationToken, + enabled_rx: watch::Receiver, + ) -> Self { + let shutdown_token = shutdown_token.child_token(); + let (server_event_tx, server_event_rx) = mpsc::channel(super::CHANNEL_CAPACITY); + let client_tracker = + ClientTracker::new(server_event_tx, transport_event_tx, &shutdown_token); + let (outbound_buffer, used_rx) = BoundedOutboundBuffer::new(); + let auth_recovery = auth_manager.unauthorized_recovery(); + + Self { + remote_control_url, + remote_control_target, + state_db, + auth_manager, + shutdown_token, + reconnect_attempt: 0, + enrollment: None, + auth_recovery, + client_tracker: Arc::new(Mutex::new(client_tracker)), + state: Arc::new(Mutex::new(WebsocketState { + outbound_buffer, + subscribe_cursor: None, + next_seq_id: 0, + })), + server_event_rx: Arc::new(Mutex::new(server_event_rx)), + used_rx, + enabled_rx, + } + } + + pub(crate) async fn run( + mut self, + app_server_client_name_rx: Option>, + ) { + let app_server_client_name = match self + .wait_for_app_server_client_name(app_server_client_name_rx) + .await + { + Ok(app_server_client_name) => app_server_client_name, + Err(_) => { + self.client_tracker.lock().await.shutdown().await; + return; + } + }; + + loop { + if !self.wait_until_enabled().await { + break; + } + + let shutdown_token = self.shutdown_token.child_token(); + let websocket_connection = match self + .connect(&shutdown_token, app_server_client_name.as_deref()) + .await + { + ConnectOutcome::Connected(websocket_connection) => *websocket_connection, + ConnectOutcome::Disabled => continue, + ConnectOutcome::Shutdown => break, + }; + + self.run_connection(websocket_connection, shutdown_token) + .await; + } + + self.client_tracker.lock().await.shutdown().await; + } + + async fn wait_for_app_server_client_name( + &self, + app_server_client_name_rx: Option>, + ) -> Result, ()> { + match app_server_client_name_rx { + Some(app_server_client_name_rx) => { + tokio::select! { + _ = self.shutdown_token.cancelled() => Err(()), + app_server_client_name = app_server_client_name_rx => match app_server_client_name { + Ok(app_server_client_name) => Ok(Some(app_server_client_name)), + Err(_) => Err(()), + }, + } + } + None => Ok(None), + } + } + + async fn wait_until_enabled(&mut self) -> bool { + tokio::select! { + _ = self.shutdown_token.cancelled() => false, + enabled = self.enabled_rx.wait_for(|enabled| *enabled) => enabled.is_ok(), + } + } + + async fn connect( + &mut self, + shutdown_token: &CancellationToken, + app_server_client_name: Option<&str>, + ) -> ConnectOutcome { + let remote_control_target = match self.remote_control_target.as_ref() { + Some(remote_control_target) => remote_control_target.clone(), + None => match super::protocol::normalize_remote_control_url(&self.remote_control_url) { + Ok(remote_control_target) => { + self.remote_control_target = Some(remote_control_target.clone()); + remote_control_target + } + Err(err) => { + warn!("remote control is enabled but the URL is invalid: {err}"); + tokio::select! { + _ = shutdown_token.cancelled() => return ConnectOutcome::Shutdown, + changed = self.enabled_rx.wait_for(|enabled| !*enabled) => { + if changed.is_err() { + return ConnectOutcome::Shutdown; + } + return ConnectOutcome::Disabled; + } + } + } + }, + }; + + loop { + let subscribe_cursor = self.state.lock().await.subscribe_cursor.clone(); + let connect_result = tokio::select! { + _ = shutdown_token.cancelled() => return ConnectOutcome::Shutdown, + changed = self.enabled_rx.wait_for(|enabled| !*enabled) => { + if changed.is_err() { + return ConnectOutcome::Shutdown; + } + return ConnectOutcome::Disabled; + } + connect_result = connect_remote_control_websocket( + &remote_control_target, + self.state_db.as_deref(), + &self.auth_manager, + &mut self.auth_recovery, + &mut self.enrollment, + subscribe_cursor.as_deref(), + app_server_client_name, + ) => connect_result, + }; + + match connect_result { + Ok((websocket_connection, response)) => { + self.reconnect_attempt = 0; + self.auth_recovery = self.auth_manager.unauthorized_recovery(); + info!( + "connected to app-server remote control websocket: {}, {}", + remote_control_target.websocket_url, + format_headers(response.headers()) + ); + return ConnectOutcome::Connected(Box::new(websocket_connection)); + } + Err(err) => { + let reconnect_delay = if err.kind() == ErrorKind::WouldBlock { + REMOTE_CONTROL_ACCOUNT_ID_RETRY_INTERVAL + } else { + warn!( + "failed to connect to app-server remote control websocket: {}, err: {}", + remote_control_target.websocket_url, err + ); + let reconnect_delay = backoff(self.reconnect_attempt); + self.reconnect_attempt += 1; + reconnect_delay + }; + tokio::select! { + _ = shutdown_token.cancelled() => return ConnectOutcome::Shutdown, + changed = self.enabled_rx.wait_for(|enabled| !*enabled) => { + if changed.is_err() { + return ConnectOutcome::Shutdown; + } + return ConnectOutcome::Disabled; + } + _ = tokio::time::sleep(reconnect_delay) => {} + } + } + } + } + } + + async fn run_connection( + &self, + websocket_connection: WebSocketStream>, + shutdown_token: CancellationToken, + ) { + let (websocket_writer, websocket_reader) = websocket_connection.split(); + let mut join_set = tokio::task::JoinSet::new(); + + join_set.spawn(Self::run_server_writer( + self.state.clone(), + self.server_event_rx.clone(), + self.used_rx.clone(), + websocket_writer, + REMOTE_CONTROL_WEBSOCKET_PING_INTERVAL, + shutdown_token.clone(), + )); + join_set.spawn(Self::run_websocket_reader( + self.client_tracker.clone(), + self.state.clone(), + websocket_reader, + REMOTE_CONTROL_WEBSOCKET_PONG_TIMEOUT, + shutdown_token.clone(), + )); + + let mut enabled_rx = self.enabled_rx.clone(); + tokio::select! { + _ = shutdown_token.cancelled() => {} + _ = enabled_rx.wait_for(|enabled| !*enabled) => shutdown_token.cancel(), + _ = join_set.join_next() => shutdown_token.cancel(), + } + + join_set.join_all().await; + } + + async fn run_server_writer( + state: Arc>, + server_event_rx: Arc>>, + used_rx: watch::Receiver, + websocket_writer: SplitSink< + WebSocketStream>, + tungstenite::Message, + >, + ping_interval: std::time::Duration, + shutdown_token: CancellationToken, + ) { + let result = Self::run_server_writer_inner( + state, + server_event_rx, + used_rx, + websocket_writer, + ping_interval, + shutdown_token, + ) + .await; + if let Err(err) = result { + warn!("remote control websocket writer disconnected, err: {err}"); + } else { + warn!("remote control websocket writer was stopped"); + } + } + + async fn run_server_writer_inner( + state: Arc>, + server_event_rx: Arc>>, + mut used_rx: watch::Receiver, + mut websocket_writer: SplitSink< + WebSocketStream>, + tungstenite::Message, + >, + ping_interval: std::time::Duration, + shutdown_token: CancellationToken, + ) -> io::Result<()> { + for server_envelope in state.lock().await.outbound_buffer.server_envelopes() { + let payload = match serde_json::to_string(&server_envelope) { + Ok(payload) => payload, + Err(err) => { + error!("failed to serialize remote-control server event: {err}"); + continue; + } + }; + tokio::select! { + _ = shutdown_token.cancelled() => return Ok(()), + send_result = websocket_writer.send(tungstenite::Message::Text(payload.into())) => { + if let Err(err) = send_result { + return Err(io::Error::other(err)); + } + } + }; + } + + let mut ping_interval = + tokio::time::interval_at(tokio::time::Instant::now() + ping_interval, ping_interval); + ping_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + + let mut server_event_rx = server_event_rx.lock().await; + loop { + let outbound_has_capacity = *used_rx.borrow() < super::CHANNEL_CAPACITY; + let queued_server_envelope = tokio::select! { + _ = shutdown_token.cancelled() => return Ok(()), + _ = ping_interval.tick() => { + if let Err(err) = websocket_writer + .send(tungstenite::Message::Ping(Vec::new().into())) + .await + { + return Err(io::Error::other(err)); + } + continue; + } + wait_result = used_rx.changed(), if !outbound_has_capacity => + { + if wait_result.is_err() { + return Err(io::Error::new( + ErrorKind::UnexpectedEof, + "outbound buffer usage channel closed", + )); + } + continue; + } + recv_result = server_event_rx.recv(), if outbound_has_capacity => { + match recv_result { + Some(queued_server_envelope) => queued_server_envelope, + None => { + return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "server event channel closed")); + } + } + } + }; + let (server_envelope, write_complete_tx) = { + let mut state = state.lock().await; + let seq_id = state.next_seq_id; + state.next_seq_id = state.next_seq_id.saturating_add(1); + + let server_envelope = ServerEnvelope { + event: queued_server_envelope.event, + client_id: queued_server_envelope.client_id, + seq_id, + stream_id: queued_server_envelope.stream_id, + }; + state.outbound_buffer.insert(&server_envelope); + + (server_envelope, queued_server_envelope.write_complete_tx) + }; + + let payload = match serde_json::to_string(&server_envelope) { + Ok(payload) => payload, + Err(err) => { + error!("failed to serialize remote-control server event: {err}"); + continue; + } + }; + + tokio::select! { + _ = shutdown_token.cancelled() => return Ok(()), + send_result = websocket_writer.send(tungstenite::Message::Text(payload.into())) => { + if let Err(err) = send_result { + return Err(io::Error::other(err)); + } + } + }; + if let Some(write_complete_tx) = write_complete_tx { + let _ = write_complete_tx.send(()); + } + } + } + + async fn run_websocket_reader( + client_tracker: Arc>, + state: Arc>, + websocket_reader: SplitStream>>, + pong_timeout: std::time::Duration, + shutdown_token: CancellationToken, + ) { + let result = Self::run_websocket_reader_inner( + client_tracker, + state, + websocket_reader, + pong_timeout, + shutdown_token, + ) + .await; + if let Err(err) = result { + warn!("remote control websocket reader disconnected, err: {err}"); + } else { + warn!("remote control websocket reader was stopped"); + } + } + + async fn run_websocket_reader_inner( + client_tracker: Arc>, + state: Arc>, + mut websocket_reader: SplitStream>>, + pong_timeout: std::time::Duration, + shutdown_token: CancellationToken, + ) -> io::Result<()> { + let mut client_tracker = client_tracker.lock().await; + let mut idle_sweep_interval = tokio::time::interval(REMOTE_CONTROL_IDLE_SWEEP_INTERVAL); + idle_sweep_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + let pong_deadline = tokio::time::sleep(pong_timeout); + tokio::pin!(pong_deadline); + + loop { + let incoming_message = tokio::select! { + _ = shutdown_token.cancelled() => return Ok(()), + _ = &mut pong_deadline => { + return Err(io::Error::new( + ErrorKind::TimedOut, + "remote control websocket pong timeout", + )); + } + client_key = client_tracker.bookkeep_join_set() => { + let Some(client_key) = client_key else { + continue; + }; + if client_tracker.close_client(&client_key).await.is_err() { + return Ok(()); + } + continue; + } + _ = idle_sweep_interval.tick() => { + if client_tracker.close_expired_clients().await.is_err() { + return Ok(()); + } + continue; + } + incoming_message = websocket_reader.next() => { + match incoming_message { + Some(incoming_message) => incoming_message, + None => return Err(io::Error::new(ErrorKind::UnexpectedEof, "websocket stream ended")), + } + } + }; + let client_envelope = match incoming_message { + Ok(tungstenite::Message::Text(text)) => { + match serde_json::from_str::(&text) { + Ok(client_envelope) => client_envelope, + Err(err) => { + warn!("failed to deserialize remote-control client event: {err}"); + continue; + } + } + } + Ok(tungstenite::Message::Pong(_)) => { + pong_deadline + .as_mut() + .reset(tokio::time::Instant::now() + pong_timeout); + continue; + } + Ok(tungstenite::Message::Ping(_)) | Ok(tungstenite::Message::Frame(_)) => continue, + Ok(tungstenite::Message::Binary(_)) => { + warn!("dropping unsupported binary remote-control websocket message"); + continue; + } + Ok(tungstenite::Message::Close(_)) => { + return Err(io::Error::new( + ErrorKind::ConnectionAborted, + "websocket disconnected", + )); + } + Err(err) => { + return Err(io::Error::new( + ErrorKind::InvalidData, + format!("failed to read from websocket: {err}"), + )); + } + }; + + let mut websocket_state = state.lock().await; + if let Some(cursor) = client_envelope.cursor.as_deref() { + websocket_state.subscribe_cursor = Some(cursor.to_string()); + } + if let ClientEvent::Ack = &client_envelope.event + && let Some(acked_seq_id) = client_envelope.seq_id + { + websocket_state + .outbound_buffer + .ack(&client_envelope.client_id, acked_seq_id); + } + drop(websocket_state); + + if client_tracker + .handle_message(client_envelope) + .await + .is_err() + { + return Ok(()); + } + } + } +} + +fn set_remote_control_header( + headers: &mut tungstenite::http::HeaderMap, + name: &'static str, + value: &str, +) -> io::Result<()> { + let header_value = HeaderValue::from_str(value).map_err(|err| { + io::Error::new( + ErrorKind::InvalidInput, + format!("invalid remote control header `{name}`: {err}"), + ) + })?; + headers.insert(name, header_value); + Ok(()) +} + +fn build_remote_control_websocket_request( + websocket_url: &str, + enrollment: &RemoteControlEnrollment, + auth: &RemoteControlConnectionAuth, + subscribe_cursor: Option<&str>, +) -> io::Result> { + let mut request = websocket_url.into_client_request().map_err(|err| { + io::Error::new( + ErrorKind::InvalidInput, + format!("invalid remote control websocket URL `{websocket_url}`: {err}"), + ) + })?; + let headers = request.headers_mut(); + set_remote_control_header(headers, "x-codex-server-id", &enrollment.server_id)?; + set_remote_control_header( + headers, + "x-codex-name", + &base64::engine::general_purpose::STANDARD.encode(&enrollment.server_name), + )?; + set_remote_control_header( + headers, + "x-codex-protocol-version", + REMOTE_CONTROL_PROTOCOL_VERSION, + )?; + set_remote_control_header( + headers, + "authorization", + &format!("Bearer {}", auth.bearer_token), + )?; + set_remote_control_header(headers, REMOTE_CONTROL_ACCOUNT_ID_HEADER, &auth.account_id)?; + if let Some(subscribe_cursor) = subscribe_cursor { + set_remote_control_header( + headers, + REMOTE_CONTROL_SUBSCRIBE_CURSOR_HEADER, + subscribe_cursor, + )?; + } + Ok(request) +} + +pub(crate) async fn load_remote_control_auth( + auth_manager: &Arc, +) -> io::Result { + let mut reloaded = false; + let auth = loop { + let Some(auth) = auth_manager.auth().await else { + if reloaded { + return Err(io::Error::new( + ErrorKind::PermissionDenied, + "remote control requires ChatGPT authentication", + )); + } + auth_manager.reload(); + reloaded = true; + continue; + }; + if !auth.is_chatgpt_auth() { + break auth; + } + if auth.get_account_id().is_none() && !reloaded { + auth_manager.reload(); + reloaded = true; + continue; + } + break auth; + }; + + if !auth.is_chatgpt_auth() { + return Err(io::Error::new( + ErrorKind::PermissionDenied, + "remote control requires ChatGPT authentication; API key auth is not supported", + )); + } + + Ok(RemoteControlConnectionAuth { + bearer_token: auth.get_token().map_err(io::Error::other)?, + account_id: auth.get_account_id().ok_or_else(|| { + io::Error::new( + ErrorKind::WouldBlock, + "remote control enrollment is waiting for a ChatGPT account id", + ) + })?, + }) +} + +pub(super) async fn connect_remote_control_websocket( + remote_control_target: &RemoteControlTarget, + state_db: Option<&StateRuntime>, + auth_manager: &Arc, + auth_recovery: &mut UnauthorizedRecovery, + enrollment: &mut Option, + subscribe_cursor: Option<&str>, + app_server_client_name: Option<&str>, +) -> io::Result<( + WebSocketStream>, + tungstenite::http::Response<()>, +)> { + ensure_rustls_crypto_provider(); + + let auth = load_remote_control_auth(auth_manager).await?; + let enrollment_account_id = enrollment.as_ref().map(|enrollment| &enrollment.account_id); + if enrollment_account_id.is_some_and(|account_id| account_id != &auth.account_id) { + info!( + "clearing in-memory remote control enrollment because account id changed: websocket_url={}, previous_account_id={:?}, current_account_id={:?}", + remote_control_target.websocket_url, + enrollment + .as_ref() + .map(|enrollment| enrollment.account_id.as_str()), + auth.account_id + ); + *enrollment = None; + } + + if enrollment.is_none() { + *enrollment = load_persisted_remote_control_enrollment( + state_db, + remote_control_target, + &auth.account_id, + app_server_client_name, + ) + .await; + } + + if enrollment.is_none() { + info!( + "creating new remote control enrollment: websocket_url={}, enroll_url={}, account_id={}", + remote_control_target.websocket_url, remote_control_target.enroll_url, auth.account_id + ); + let new_enrollment = match enroll_remote_control_server(remote_control_target, &auth).await + { + Ok(new_enrollment) => new_enrollment, + Err(err) + if err.kind() == ErrorKind::PermissionDenied + && recover_remote_control_auth(auth_recovery).await => + { + return Err(io::Error::other(format!( + "{err}; retrying after auth recovery" + ))); + } + Err(err) => return Err(err), + }; + if let Err(err) = update_persisted_remote_control_enrollment( + state_db, + remote_control_target, + &auth.account_id, + app_server_client_name, + Some(&new_enrollment), + ) + .await + { + warn!("failed to persist remote control enrollment in sqlite state db: {err}"); + } + info!( + "created new remote control enrollment: websocket_url={}, account_id={}, server_id={}, environment_id={}", + remote_control_target.websocket_url, + new_enrollment.account_id, + new_enrollment.server_id, + new_enrollment.environment_id + ); + *enrollment = Some(new_enrollment); + } + + let enrollment_ref = enrollment.as_ref().ok_or_else(|| { + io::Error::other("missing remote control enrollment after enrollment step") + })?; + let request = build_remote_control_websocket_request( + &remote_control_target.websocket_url, + enrollment_ref, + &auth, + subscribe_cursor, + )?; + + match connect_async(request).await { + Ok((websocket_stream, response)) => Ok((websocket_stream, response.map(|_| ()))), + Err(err) => { + match &err { + tungstenite::Error::Http(response) if response.status().as_u16() == 404 => { + info!( + "remote control websocket returned HTTP 404; clearing stale enrollment before re-enrolling: websocket_url={}, account_id={}, server_id={}, environment_id={}", + remote_control_target.websocket_url, + auth.account_id, + enrollment_ref.server_id, + enrollment_ref.environment_id + ); + if let Err(clear_err) = update_persisted_remote_control_enrollment( + state_db, + remote_control_target, + &auth.account_id, + app_server_client_name, + /*enrollment*/ None, + ) + .await + { + warn!( + "failed to clear stale remote control enrollment in sqlite state db: {clear_err}" + ); + } + *enrollment = None; + } + tungstenite::Error::Http(response) + if matches!(response.status().as_u16(), 401 | 403) => + { + if recover_remote_control_auth(auth_recovery).await { + return Err(io::Error::other(format!( + "remote control websocket auth failed with HTTP {}; retrying after auth recovery", + response.status() + ))); + } + } + _ => {} + } + Err(io::Error::other( + format_remote_control_websocket_connect_error( + &remote_control_target.websocket_url, + &err, + ), + )) + } + } +} + +async fn recover_remote_control_auth(auth_recovery: &mut UnauthorizedRecovery) -> bool { + if !auth_recovery.has_next() { + return false; + } + + let mode = auth_recovery.mode_name(); + let step = auth_recovery.step_name(); + match auth_recovery.next().await { + Ok(step_result) => { + info!( + "remote control websocket auth recovery succeeded: mode={mode}, step={step}, auth_state_changed={:?}", + step_result.auth_state_changed() + ); + true + } + Err(err) => { + warn!("remote control websocket auth recovery failed: mode={mode}, step={step}: {err}"); + false + } + } +} + +fn format_remote_control_websocket_connect_error( + websocket_url: &str, + err: &tungstenite::Error, +) -> String { + let mut message = + format!("failed to connect app-server remote control websocket `{websocket_url}`: {err}"); + let tungstenite::Error::Http(response) = err else { + return message; + }; + + message.push_str(&format!(", {}", format_headers(response.headers()))); + if let Some(body) = response.body().as_ref() + && !body.is_empty() + { + let body_preview = preview_remote_control_response_body(body); + message.push_str(&format!(", body: {body_preview}")); + } + + message +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::outgoing_message::OutgoingMessage; + use crate::transport::remote_control::ServerEvent; + use crate::transport::remote_control::protocol::StreamId; + use crate::transport::remote_control::protocol::normalize_remote_control_url; + use chrono::Utc; + use codex_app_server_protocol::AuthMode; + use codex_app_server_protocol::ConfigWarningNotification; + use codex_app_server_protocol::ServerNotification; + use codex_config::types::AuthCredentialsStoreMode; + use codex_core::test_support::auth_manager_from_auth; + use codex_login::AuthDotJson; + use codex_login::CodexAuth; + use codex_login::save_auth; + use codex_login::token_data::TokenData; + use codex_login::token_data::parse_chatgpt_jwt_claims; + use codex_state::StateRuntime; + use futures::StreamExt; + use pretty_assertions::assert_eq; + use std::sync::Arc; + use tempfile::TempDir; + use tokio::io::AsyncBufReadExt; + use tokio::io::AsyncWriteExt; + use tokio::io::BufReader; + use tokio::net::TcpListener; + use tokio::net::TcpStream; + use tokio::sync::mpsc; + use tokio::time::Duration; + use tokio::time::timeout; + use tokio_tungstenite::accept_async; + + async fn remote_control_state_runtime(codex_home: &TempDir) -> Arc { + StateRuntime::init(codex_home.path().to_path_buf(), "test-provider".to_string()) + .await + .expect("state runtime should initialize") + } + + fn remote_control_auth_manager() -> Arc { + auth_manager_from_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing()) + } + + fn remote_control_url_for_listener(listener: &TcpListener) -> String { + format!( + "http://localhost:{}/backend-api/", + listener + .local_addr() + .expect("listener should have a local addr") + .port() + ) + } + + fn remote_control_auth_dot_json(access_token: &str) -> AuthDotJson { + #[derive(serde::Serialize)] + struct Header { + alg: &'static str, + typ: &'static str, + } + + let header = Header { + alg: "none", + typ: "JWT", + }; + let payload = serde_json::json!({ + "email": "user@example.com", + "https://api.openai.com/auth": { + "chatgpt_user_id": "user-12345", + "user_id": "user-12345", + "chatgpt_account_id": "account_id" + } + }); + let b64 = |bytes: &[u8]| base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(bytes); + let header_b64 = b64(&serde_json::to_vec(&header).expect("header should serialize")); + let payload_b64 = b64(&serde_json::to_vec(&payload).expect("payload should serialize")); + let fake_jwt = format!("{header_b64}.{payload_b64}.sig"); + + AuthDotJson { + auth_mode: Some(AuthMode::Chatgpt), + openai_api_key: None, + tokens: Some(TokenData { + id_token: parse_chatgpt_jwt_claims(&fake_jwt).expect("fake jwt should parse"), + access_token: access_token.to_string(), + refresh_token: "refresh-token".to_string(), + account_id: Some("account_id".to_string()), + }), + last_refresh: Some(Utc::now()), + } + } + + #[tokio::test] + async fn connect_remote_control_websocket_includes_http_error_details() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + let remote_control_target = + normalize_remote_control_url(&remote_control_url).expect("target should parse"); + let expected_error = format!( + "failed to connect app-server remote control websocket `{}`: HTTP error: 503 Service Unavailable, request-id: , cf-ray: , body: upstream unavailable", + remote_control_target.websocket_url + ); + let server_task = tokio::spawn(async move { + let (stream, request_line) = accept_http_request(&listener).await; + assert_eq!( + request_line, + "GET /backend-api/wham/remote/control/server HTTP/1.1" + ); + respond_with_status_and_headers( + stream, + "503 Service Unavailable", + &[("x-trace-id", "trace-503"), ("x-region", "us-east-1")], + "upstream unavailable", + ) + .await; + }); + let codex_home = TempDir::new().expect("temp dir should create"); + let state_db = remote_control_state_runtime(&codex_home).await; + let auth_manager = remote_control_auth_manager(); + let mut auth_recovery = auth_manager.unauthorized_recovery(); + let mut enrollment = Some(RemoteControlEnrollment { + account_id: "account_id".to_string(), + environment_id: "env_test".to_string(), + server_id: "srv_e_test".to_string(), + server_name: "test-server".to_string(), + }); + + let err = match connect_remote_control_websocket( + &remote_control_target, + Some(state_db.as_ref()), + &auth_manager, + &mut auth_recovery, + &mut enrollment, + /*subscribe_cursor*/ None, + /*app_server_client_name*/ None, + ) + .await + { + Ok(_) => panic!("http error response should fail the websocket connect"), + Err(err) => err, + }; + + server_task.await.expect("server task should succeed"); + assert_eq!(err.to_string(), expected_error); + } + + #[tokio::test] + async fn connect_remote_control_websocket_recovers_after_unauthorized_reload() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + let remote_control_target = + normalize_remote_control_url(&remote_control_url).expect("target should parse"); + let server_task = tokio::spawn(async move { + let (stream, request_line) = accept_http_request(&listener).await; + assert_eq!( + request_line, + "GET /backend-api/wham/remote/control/server HTTP/1.1" + ); + respond_with_status_and_headers(stream, "401 Unauthorized", &[], "unauthorized").await; + }); + let codex_home = TempDir::new().expect("temp dir should create"); + save_auth( + codex_home.path(), + &remote_control_auth_dot_json("stale-token"), + AuthCredentialsStoreMode::File, + ) + .expect("stale auth should save"); + let state_db = remote_control_state_runtime(&codex_home).await; + let auth_manager = AuthManager::shared( + codex_home.path().to_path_buf(), + /*enable_codex_api_key_env*/ false, + AuthCredentialsStoreMode::File, + ); + let mut auth_recovery = auth_manager.unauthorized_recovery(); + let mut enrollment = Some(RemoteControlEnrollment { + account_id: "account_id".to_string(), + environment_id: "env_test".to_string(), + server_id: "srv_e_test".to_string(), + server_name: "test-server".to_string(), + }); + save_auth( + codex_home.path(), + &remote_control_auth_dot_json("fresh-token"), + AuthCredentialsStoreMode::File, + ) + .expect("fresh auth should save"); + + let err = connect_remote_control_websocket( + &remote_control_target, + Some(state_db.as_ref()), + &auth_manager, + &mut auth_recovery, + &mut enrollment, + /*subscribe_cursor*/ None, + /*app_server_client_name*/ None, + ) + .await + .expect_err("unauthorized response should fail the websocket connect"); + + server_task.await.expect("server task should succeed"); + assert_eq!( + err.to_string(), + "remote control websocket auth failed with HTTP 401 Unauthorized; retrying after auth recovery" + ); + assert_eq!( + auth_manager + .auth() + .await + .expect("auth should remain available") + .get_token() + .expect("token should be readable"), + "fresh-token" + ); + } + + #[tokio::test] + async fn connect_remote_control_websocket_recovers_after_unauthorized_enrollment() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + let remote_control_target = + normalize_remote_control_url(&remote_control_url).expect("target should parse"); + let enroll_url = remote_control_target.enroll_url.clone(); + let server_task = tokio::spawn(async move { + let (stream, request_line) = accept_http_request(&listener).await; + assert_eq!( + request_line, + "POST /backend-api/wham/remote/control/server/enroll HTTP/1.1" + ); + respond_with_status_and_headers(stream, "401 Unauthorized", &[], "unauthorized").await; + }); + let codex_home = TempDir::new().expect("temp dir should create"); + save_auth( + codex_home.path(), + &remote_control_auth_dot_json("stale-token"), + AuthCredentialsStoreMode::File, + ) + .expect("stale auth should save"); + let state_db = remote_control_state_runtime(&codex_home).await; + let auth_manager = AuthManager::shared( + codex_home.path().to_path_buf(), + /*enable_codex_api_key_env*/ false, + AuthCredentialsStoreMode::File, + ); + let mut auth_recovery = auth_manager.unauthorized_recovery(); + let mut enrollment = None; + save_auth( + codex_home.path(), + &remote_control_auth_dot_json("fresh-token"), + AuthCredentialsStoreMode::File, + ) + .expect("fresh auth should save"); + + let err = connect_remote_control_websocket( + &remote_control_target, + Some(state_db.as_ref()), + &auth_manager, + &mut auth_recovery, + &mut enrollment, + /*subscribe_cursor*/ None, + /*app_server_client_name*/ None, + ) + .await + .expect_err("unauthorized enrollment should fail the websocket connect"); + + server_task.await.expect("server task should succeed"); + assert_eq!( + err.to_string(), + format!( + "remote control server enrollment failed at `{enroll_url}`: HTTP 401 Unauthorized, request-id: , cf-ray: , body: unauthorized; retrying after auth recovery" + ) + ); + assert_eq!( + auth_manager + .auth() + .await + .expect("auth should remain available") + .get_token() + .expect("token should be readable"), + "fresh-token" + ); + } + + #[tokio::test] + async fn run_remote_control_websocket_loop_shutdown_cancels_reconnect_backoff() { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let remote_control_url = remote_control_url_for_listener(&listener); + drop(listener); + + let remote_control_target = + normalize_remote_control_url(&remote_control_url).expect("target should parse"); + let (transport_event_tx, transport_event_rx) = mpsc::channel(1); + drop(transport_event_rx); + let shutdown_token = CancellationToken::new(); + let (_enabled_tx, enabled_rx) = watch::channel(true); + let websocket_task = tokio::spawn({ + let shutdown_token = shutdown_token.clone(); + async move { + RemoteControlWebsocket::new( + remote_control_url, + Some(remote_control_target), + /*state_db*/ None, + remote_control_auth_manager(), + transport_event_tx, + shutdown_token, + enabled_rx, + ) + .run(/*app_server_client_name_rx*/ None) + .await + } + }); + + tokio::time::sleep(Duration::from_millis(50)).await; + shutdown_token.cancel(); + + timeout(Duration::from_millis(100), websocket_task) + .await + .expect("shutdown should cancel reconnect backoff") + .expect("websocket task should join"); + } + + #[tokio::test] + async fn run_server_writer_inner_sends_periodic_ping_frames() { + let (client_stream, mut server_stream) = connected_websocket_pair().await; + let (websocket_writer, _websocket_reader) = client_stream.split(); + let (outbound_buffer, used_rx) = BoundedOutboundBuffer::new(); + let state = Arc::new(Mutex::new(WebsocketState { + outbound_buffer, + subscribe_cursor: None, + next_seq_id: 0, + })); + let (_server_event_tx, server_event_rx) = mpsc::channel(super::super::CHANNEL_CAPACITY); + let server_event_rx = Arc::new(Mutex::new(server_event_rx)); + let shutdown_token = CancellationToken::new(); + let writer_task = tokio::spawn(RemoteControlWebsocket::run_server_writer_inner( + state, + server_event_rx, + used_rx, + websocket_writer, + Duration::from_millis(20), + shutdown_token.clone(), + )); + + let message = timeout(Duration::from_secs(5), server_stream.next()) + .await + .expect("ping frame should arrive in time") + .expect("server websocket should stay open") + .expect("ping frame should read"); + assert!(matches!(message, tungstenite::Message::Ping(_))); + + shutdown_token.cancel(); + writer_task + .await + .expect("writer task should join") + .expect("writer should stop cleanly"); + } + + #[tokio::test] + async fn run_websocket_reader_inner_times_out_without_pong_frames() { + let (client_stream, _server_stream) = connected_websocket_pair().await; + let (_websocket_writer, websocket_reader) = client_stream.split(); + let (outbound_buffer, _used_rx) = BoundedOutboundBuffer::new(); + let state = Arc::new(Mutex::new(WebsocketState { + outbound_buffer, + subscribe_cursor: None, + next_seq_id: 0, + })); + let (server_event_tx, _server_event_rx) = mpsc::channel(super::super::CHANNEL_CAPACITY); + let (transport_event_tx, _transport_event_rx) = + mpsc::channel(super::super::CHANNEL_CAPACITY); + let shutdown_token = CancellationToken::new(); + let client_tracker = Arc::new(Mutex::new(ClientTracker::new( + server_event_tx, + transport_event_tx, + &shutdown_token, + ))); + + let err = timeout( + Duration::from_secs(5), + RemoteControlWebsocket::run_websocket_reader_inner( + client_tracker, + state, + websocket_reader, + Duration::from_millis(100), + shutdown_token, + ), + ) + .await + .expect("reader should time out waiting for pong") + .expect_err("missing pong should fail the websocket reader"); + + assert_eq!(err.kind(), ErrorKind::TimedOut); + assert_eq!(err.to_string(), "remote control websocket pong timeout"); + } + + #[test] + fn outbound_buffer_acks_by_client_id_across_stream_ids() { + let (mut outbound_buffer, used_rx) = BoundedOutboundBuffer::new(); + let client_1 = ClientId("client-1".to_string()); + let client_2 = ClientId("client-2".to_string()); + + outbound_buffer.insert(&server_envelope( + &client_1, + "stream-1", + /*seq_id*/ 0, + "first-client-old-stream", + )); + outbound_buffer.insert(&server_envelope( + &client_2, + "stream-1", + /*seq_id*/ 1, + "second-client", + )); + outbound_buffer.insert(&server_envelope( + &client_1, + "stream-2", + /*seq_id*/ 2, + "first-client-new-stream", + )); + + outbound_buffer.ack(&client_1, /*acked_seq_id*/ 2); + + let mut retained = outbound_buffer + .server_envelopes() + .map(|server_envelope| { + ( + server_envelope.client_id.0.as_str(), + server_envelope.stream_id.0.as_str(), + server_envelope.seq_id, + ) + }) + .collect::>(); + retained.sort_unstable(); + assert_eq!(retained, vec![("client-2", "stream-1", 1)]); + assert_eq!(*used_rx.borrow(), 1); + } + + #[test] + fn outbound_buffer_retains_unacked_messages_until_ack_advances() { + let (mut outbound_buffer, used_rx) = BoundedOutboundBuffer::new(); + let client_1 = ClientId("client-1".to_string()); + let client_2 = ClientId("client-2".to_string()); + + outbound_buffer.insert(&server_envelope( + &client_1, + "stream-1", + /*seq_id*/ 0, + "first-old", + )); + outbound_buffer.insert(&server_envelope( + &client_1, + "stream-2", + /*seq_id*/ 1, + "first-new", + )); + outbound_buffer.insert(&server_envelope( + &client_2, "stream-1", /*seq_id*/ 2, "second", + )); + + outbound_buffer.ack(&client_1, /*acked_seq_id*/ 0); + + let mut retained = outbound_buffer + .server_envelopes() + .map(|server_envelope| { + ( + server_envelope.client_id.0.as_str(), + server_envelope.stream_id.0.as_str(), + server_envelope.seq_id, + ) + }) + .collect::>(); + retained.sort_unstable(); + assert_eq!( + retained, + vec![("client-1", "stream-2", 1), ("client-2", "stream-1", 2)] + ); + assert_eq!(*used_rx.borrow(), 2); + } + + fn server_envelope( + client_id: &ClientId, + stream_id: &str, + seq_id: u64, + summary: &str, + ) -> ServerEnvelope { + ServerEnvelope { + event: ServerEvent::ServerMessage { + message: Box::new(OutgoingMessage::AppServerNotification( + ServerNotification::ConfigWarning(ConfigWarningNotification { + summary: summary.to_string(), + details: None, + path: None, + range: None, + }), + )), + }, + client_id: client_id.clone(), + stream_id: StreamId(stream_id.to_string()), + seq_id, + } + } + + async fn accept_http_request(listener: &TcpListener) -> (TcpStream, String) { + let (stream, _) = timeout(Duration::from_secs(5), listener.accept()) + .await + .expect("HTTP request should arrive in time") + .expect("listener accept should succeed"); + let mut reader = BufReader::new(stream); + + let mut request_line = String::new(); + reader + .read_line(&mut request_line) + .await + .expect("request line should read"); + loop { + let mut line = String::new(); + reader + .read_line(&mut line) + .await + .expect("header line should read"); + if line == "\r\n" { + break; + } + } + + ( + reader.into_inner(), + request_line.trim_end_matches("\r\n").to_string(), + ) + } + + async fn connected_websocket_pair() -> ( + WebSocketStream>, + WebSocketStream, + ) { + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("listener should bind"); + let connect_task = tokio::spawn(connect_async(format!( + "ws://{}", + listener + .local_addr() + .expect("listener should have a local addr") + ))); + let (server_stream, _) = listener + .accept() + .await + .expect("server should accept client"); + let server_stream = accept_async(server_stream) + .await + .expect("server websocket handshake should succeed"); + let (client_stream, _) = connect_task + .await + .expect("client connect task should join") + .expect("client websocket handshake should succeed"); + + (client_stream, server_stream) + } + + async fn respond_with_status_and_headers( + mut stream: TcpStream, + status: &str, + headers: &[(&str, &str)], + body: &str, + ) { + let extra_headers = headers + .iter() + .map(|(name, value)| format!("{name}: {value}\r\n")) + .collect::(); + let response = format!( + "HTTP/1.1 {status}\r\ncontent-type: text/plain\r\ncontent-length: {}\r\nconnection: close\r\n{extra_headers}\r\n{body}", + body.len(), + ); + stream + .write_all(response.as_bytes()) + .await + .expect("response should write"); + stream.flush().await.expect("response should flush"); + } +} diff --git a/codex-rs/app-server/src/transport/stdio.rs b/codex-rs/app-server/src/transport/stdio.rs index 4f2bf26745..20eab025fb 100644 --- a/codex-rs/app-server/src/transport/stdio.rs +++ b/codex-rs/app-server/src/transport/stdio.rs @@ -1,9 +1,12 @@ use super::CHANNEL_CAPACITY; use super::TransportEvent; use super::forward_incoming_message; +use super::next_connection_id; use super::serialize_outgoing_message; -use crate::outgoing_message::ConnectionId; use crate::outgoing_message::QueuedOutgoingMessage; +use codex_app_server_protocol::InitializeParams; +use codex_app_server_protocol::JSONRPCMessage; +use codex_app_server_protocol::JSONRPCRequest; use std::io::ErrorKind; use std::io::Result as IoResult; use tokio::io; @@ -11,6 +14,7 @@ use tokio::io::AsyncBufReadExt; use tokio::io::AsyncWriteExt; use tokio::io::BufReader; use tokio::sync::mpsc; +use tokio::sync::oneshot; use tokio::task::JoinHandle; use tracing::debug; use tracing::error; @@ -19,8 +23,9 @@ use tracing::info; pub(crate) async fn start_stdio_connection( transport_event_tx: mpsc::Sender, stdio_handles: &mut Vec>, + initialize_client_name_tx: oneshot::Sender, ) -> IoResult<()> { - let connection_id = ConnectionId(0); + let connection_id = next_connection_id(); let (writer_tx, mut writer_rx) = mpsc::channel::(CHANNEL_CAPACITY); let writer_tx_for_reader = writer_tx.clone(); transport_event_tx @@ -37,10 +42,16 @@ pub(crate) async fn start_stdio_connection( let stdin = io::stdin(); let reader = BufReader::new(stdin); let mut lines = reader.lines(); + let mut initialize_client_name_tx = Some(initialize_client_name_tx); loop { match lines.next_line().await { Ok(Some(line)) => { + if let Some(client_name) = stdio_initialize_client_name(&line) + && let Some(initialize_client_name_tx) = initialize_client_name_tx.take() + { + let _ = initialize_client_name_tx.send(client_name); + } if !forward_incoming_message( &transport_event_tx_for_reader, &writer_tx_for_reader, @@ -86,3 +97,15 @@ pub(crate) async fn start_stdio_connection( Ok(()) } + +fn stdio_initialize_client_name(line: &str) -> Option { + let message = serde_json::from_str::(line).ok()?; + let JSONRPCMessage::Request(JSONRPCRequest { method, params, .. }) = message else { + return None; + }; + if method != "initialize" { + return None; + } + let params = serde_json::from_value::(params?).ok()?; + Some(params.client_info.name) +} diff --git a/codex-rs/app-server/src/transport/websocket.rs b/codex-rs/app-server/src/transport/websocket.rs index 05dfe24b05..41b138e216 100644 --- a/codex-rs/app-server/src/transport/websocket.rs +++ b/codex-rs/app-server/src/transport/websocket.rs @@ -4,6 +4,7 @@ use super::auth::WebsocketAuthPolicy; use super::auth::authorize_upgrade; use super::auth::should_warn_about_unauthenticated_non_loopback_listener; use super::forward_incoming_message; +use super::next_connection_id; use super::serialize_outgoing_message; use crate::outgoing_message::ConnectionId; use crate::outgoing_message::QueuedOutgoingMessage; @@ -32,8 +33,6 @@ use owo_colors::Style; use std::io::Result as IoResult; use std::net::SocketAddr; use std::sync::Arc; -use std::sync::atomic::AtomicU64; -use std::sync::atomic::Ordering; use tokio::net::TcpListener; use tokio::sync::mpsc; use tokio::task::JoinHandle; @@ -75,7 +74,6 @@ fn print_websocket_startup_banner(addr: SocketAddr) { #[derive(Clone)] struct WebSocketListenerState { transport_event_tx: mpsc::Sender, - connection_counter: Arc, auth_policy: Arc, } @@ -113,7 +111,7 @@ async fn websocket_upgrade_handler( ); return (err.status_code(), err.message()).into_response(); } - let connection_id = ConnectionId(state.connection_counter.fetch_add(1, Ordering::Relaxed)); + let connection_id = next_connection_id(); info!(%peer_addr, "websocket client connected"); websocket .on_upgrade(move |stream| async move { @@ -146,7 +144,6 @@ pub(crate) async fn start_websocket_acceptor( .layer(middleware::from_fn(reject_requests_with_origin_header)) .with_state(WebSocketListenerState { transport_event_tx, - connection_counter: Arc::new(AtomicU64::new(1)), auth_policy: Arc::new(auth_policy), }); let server = axum::serve( diff --git a/codex-rs/app-server/tests/common/Cargo.toml b/codex-rs/app-server/tests/common/Cargo.toml index 6eb1a2c588..aef2f58dff 100644 --- a/codex-rs/app-server/tests/common/Cargo.toml +++ b/codex-rs/app-server/tests/common/Cargo.toml @@ -15,6 +15,7 @@ anyhow = { workspace = true } base64 = { workspace = true } chrono = { workspace = true } codex-app-server-protocol = { workspace = true } +codex-config = { workspace = true } codex-core = { workspace = true } codex-features = { workspace = true } codex-login = { workspace = true } diff --git a/codex-rs/app-server/tests/common/auth_fixtures.rs b/codex-rs/app-server/tests/common/auth_fixtures.rs index dfda247254..99334f0770 100644 --- a/codex-rs/app-server/tests/common/auth_fixtures.rs +++ b/codex-rs/app-server/tests/common/auth_fixtures.rs @@ -7,7 +7,7 @@ use base64::engine::general_purpose::URL_SAFE_NO_PAD; use chrono::DateTime; use chrono::Utc; use codex_app_server_protocol::AuthMode; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_login::AuthDotJson; use codex_login::save_auth; use codex_login::token_data::TokenData; diff --git a/codex-rs/app-server/tests/common/mcp_process.rs b/codex-rs/app-server/tests/common/mcp_process.rs index 28e48dfdaf..e660b82646 100644 --- a/codex-rs/app-server/tests/common/mcp_process.rs +++ b/codex-rs/app-server/tests/common/mcp_process.rs @@ -47,6 +47,7 @@ use codex_app_server_protocol::JSONRPCRequest; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::ListMcpServerStatusParams; use codex_app_server_protocol::LoginAccountParams; +use codex_app_server_protocol::McpResourceReadParams; use codex_app_server_protocol::MockExperimentalMethodParams; use codex_app_server_protocol::ModelListParams; use codex_app_server_protocol::PluginInstallParams; @@ -482,6 +483,15 @@ impl McpProcess { self.send_request("app/list", params).await } + /// Send an `mcpServer/resource/read` JSON-RPC request. + pub async fn send_mcp_resource_read_request( + &mut self, + params: McpResourceReadParams, + ) -> anyhow::Result { + let params = Some(serde_json::to_value(params)?); + self.send_request("mcpServer/resource/read", params).await + } + /// Send a `skills/list` JSON-RPC request. pub async fn send_skills_list_request( &mut self, diff --git a/codex-rs/app-server/tests/common/rollout.rs b/codex-rs/app-server/tests/common/rollout.rs index b67390154e..efbed53255 100644 --- a/codex-rs/app-server/tests/common/rollout.rs +++ b/codex-rs/app-server/tests/common/rollout.rs @@ -84,6 +84,7 @@ pub fn create_fake_rollout_with_source( agent_role: None, model_provider: model_provider.map(str::to_string), base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, }; @@ -167,6 +168,7 @@ pub fn create_fake_rollout_with_text_elements( agent_role: None, model_provider: model_provider.map(str::to_string), base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, }; diff --git a/codex-rs/app-server/tests/suite/auth.rs b/codex-rs/app-server/tests/suite/auth.rs index 78572d8ce1..e6134e4802 100644 --- a/codex-rs/app-server/tests/suite/auth.rs +++ b/codex-rs/app-server/tests/suite/auth.rs @@ -12,7 +12,7 @@ use codex_app_server_protocol::JSONRPCError; use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::LoginAccountResponse; use codex_app_server_protocol::RequestId; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_login::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR; use pretty_assertions::assert_eq; use std::path::Path; diff --git a/codex-rs/app-server/tests/suite/v2/account.rs b/codex-rs/app-server/tests/suite/v2/account.rs index 0755c37553..3c88bcb7a4 100644 --- a/codex-rs/app-server/tests/suite/v2/account.rs +++ b/codex-rs/app-server/tests/suite/v2/account.rs @@ -28,7 +28,7 @@ use codex_app_server_protocol::ServerNotification; use codex_app_server_protocol::ServerRequest; use codex_app_server_protocol::TurnCompletedNotification; use codex_app_server_protocol::TurnStatus; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_login::login_with_api_key; use codex_protocol::account::PlanType as AccountPlanType; use core_test_support::responses; diff --git a/codex-rs/app-server/tests/suite/v2/analytics.rs b/codex-rs/app-server/tests/suite/v2/analytics.rs index 8e8e328a84..a4d7a7f349 100644 --- a/codex-rs/app-server/tests/suite/v2/analytics.rs +++ b/codex-rs/app-server/tests/suite/v2/analytics.rs @@ -2,10 +2,10 @@ use anyhow::Result; use app_test_support::ChatGptAuthFixture; use app_test_support::DEFAULT_CLIENT_NAME; use app_test_support::write_chatgpt_auth; +use codex_config::types::AuthCredentialsStoreMode; use codex_config::types::OtelExporterKind; use codex_config::types::OtelHttpProtocol; use codex_core::config::ConfigBuilder; -use codex_login::AuthCredentialsStoreMode; use pretty_assertions::assert_eq; use serde_json::Value; use std::collections::HashMap; diff --git a/codex-rs/app-server/tests/suite/v2/app_list.rs b/codex-rs/app-server/tests/suite/v2/app_list.rs index d8cedf2a41..57a27961af 100644 --- a/codex-rs/app-server/tests/suite/v2/app_list.rs +++ b/codex-rs/app-server/tests/suite/v2/app_list.rs @@ -35,7 +35,7 @@ use codex_app_server_protocol::RequestId; use codex_app_server_protocol::ServerNotification; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::ThreadStartResponse; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_login::AuthDotJson; use codex_login::save_auth; use pretty_assertions::assert_eq; diff --git a/codex-rs/app-server/tests/suite/v2/compaction.rs b/codex-rs/app-server/tests/suite/v2/compaction.rs index 8849b39abb..e7661546ac 100644 --- a/codex-rs/app-server/tests/suite/v2/compaction.rs +++ b/codex-rs/app-server/tests/suite/v2/compaction.rs @@ -28,7 +28,7 @@ use codex_app_server_protocol::TurnCompletedNotification; use codex_app_server_protocol::TurnStartParams; use codex_app_server_protocol::TurnStartResponse; use codex_app_server_protocol::UserInput as V2UserInput; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseItem; use core_test_support::responses; diff --git a/codex-rs/app-server/tests/suite/v2/config_rpc.rs b/codex-rs/app-server/tests/suite/v2/config_rpc.rs index 23c9a6c6c2..99e33eb6b9 100644 --- a/codex-rs/app-server/tests/suite/v2/config_rpc.rs +++ b/codex-rs/app-server/tests/suite/v2/config_rpc.rs @@ -546,7 +546,7 @@ model = "gpt-old" ) .await??; let write: ConfigWriteResponse = to_response(write_resp)?; - let expected_file_path = AbsolutePathBuf::resolve_path_against_base("config.toml", codex_home)?; + let expected_file_path = AbsolutePathBuf::resolve_path_against_base("config.toml", codex_home); assert_eq!(write.status, WriteStatus::Ok); assert_eq!(write.file_path, expected_file_path); @@ -647,7 +647,7 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> { .await??; let batch_write: ConfigWriteResponse = to_response(batch_resp)?; assert_eq!(batch_write.status, WriteStatus::Ok); - let expected_file_path = AbsolutePathBuf::resolve_path_against_base("config.toml", codex_home)?; + let expected_file_path = AbsolutePathBuf::resolve_path_against_base("config.toml", codex_home); assert_eq!(batch_write.file_path, expected_file_path); let read_id = mcp diff --git a/codex-rs/app-server/tests/suite/v2/fs.rs b/codex-rs/app-server/tests/suite/v2/fs.rs index e113904809..3fd5d62c89 100644 --- a/codex-rs/app-server/tests/suite/v2/fs.rs +++ b/codex-rs/app-server/tests/suite/v2/fs.rs @@ -21,8 +21,6 @@ use std::path::PathBuf; use tempfile::TempDir; use tokio::time::Duration; use tokio::time::timeout; -use uuid::Uuid; -use uuid::Version; #[cfg(unix)] use std::os::unix::fs::symlink; @@ -628,8 +626,10 @@ async fn fs_watch_directory_reports_changed_child_paths_and_unwatch_stops_notifi std::fs::write(&fetch_head, "old\n")?; let mut mcp = initialized_mcp(&codex_home).await?; + let watch_id = "watch-git-dir".to_string(); let watch_request_id = mcp .send_fs_watch_request(codex_app_server_protocol::FsWatchParams { + watch_id: watch_id.clone(), path: absolute_path(git_dir.clone()), }) .await?; @@ -641,8 +641,6 @@ async fn fs_watch_directory_reports_changed_child_paths_and_unwatch_stops_notifi .await??, )?; assert_eq!(watch_response.path, absolute_path(git_dir.clone())); - let watch_id = Uuid::parse_str(&watch_response.watch_id)?; - assert_eq!(watch_id.get_version(), Some(Version::SortRand)); std::fs::write(&fetch_head, "updated\n")?; @@ -650,7 +648,7 @@ async fn fs_watch_directory_reports_changed_child_paths_and_unwatch_stops_notifi // Keep validating notification shape when the backend does emit, but do not // fail the whole suite if no OS event arrives. if let Some(changed) = maybe_fs_changed_notification(&mut mcp).await? { - assert_eq!(changed.watch_id, watch_response.watch_id.clone()); + assert_eq!(changed.watch_id, watch_id.clone()); assert_eq!( changed.changed_paths, vec![absolute_path(fetch_head.clone())] @@ -665,9 +663,7 @@ async fn fs_watch_directory_reports_changed_child_paths_and_unwatch_stops_notifi {} let unwatch_request_id = mcp - .send_fs_unwatch_request(FsUnwatchParams { - watch_id: watch_response.watch_id, - }) + .send_fs_unwatch_request(FsUnwatchParams { watch_id }) .await?; timeout( DEFAULT_READ_TIMEOUT, @@ -698,8 +694,10 @@ async fn fs_watch_file_reports_atomic_replace_events() -> Result<()> { std::fs::write(&head_path, "ref: refs/heads/main\n")?; let mut mcp = initialized_mcp(&codex_home).await?; + let watch_id = "watch-head".to_string(); let watch_request_id = mcp .send_fs_watch_request(codex_app_server_protocol::FsWatchParams { + watch_id: watch_id.clone(), path: absolute_path(head_path.clone()), }) .await?; @@ -718,7 +716,7 @@ async fn fs_watch_file_reports_atomic_replace_events() -> Result<()> { assert_eq!( changed, FsChangedNotification { - watch_id: watch_response.watch_id, + watch_id, changed_paths: vec![absolute_path(head_path.clone())], } ); @@ -735,8 +733,10 @@ async fn fs_watch_allows_missing_file_targets() -> Result<()> { std::fs::create_dir_all(&git_dir)?; let mut mcp = initialized_mcp(&codex_home).await?; + let watch_id = "watch-fetch-head".to_string(); let watch_request_id = mcp .send_fs_watch_request(codex_app_server_protocol::FsWatchParams { + watch_id: watch_id.clone(), path: absolute_path(fetch_head.clone()), }) .await?; @@ -755,7 +755,7 @@ async fn fs_watch_allows_missing_file_targets() -> Result<()> { assert_eq!( changed, FsChangedNotification { - watch_id: watch_response.watch_id, + watch_id, changed_paths: vec![absolute_path(fetch_head.clone())], } ); @@ -770,7 +770,10 @@ async fn fs_watch_rejects_relative_paths() -> Result<()> { let mut mcp = initialized_mcp(&codex_home).await?; let watch_id = mcp - .send_raw_request("fs/watch", Some(json!({ "path": "relative-path" }))) + .send_raw_request( + "fs/watch", + Some(json!({ "watchId": "watch-relative", "path": "relative-path" })), + ) .await?; expect_error_message( &mut mcp, diff --git a/codex-rs/app-server/tests/suite/v2/mcp_resource.rs b/codex-rs/app-server/tests/suite/v2/mcp_resource.rs new file mode 100644 index 0000000000..0a38f3b37e --- /dev/null +++ b/codex-rs/app-server/tests/suite/v2/mcp_resource.rs @@ -0,0 +1,218 @@ +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Result; +use app_test_support::ChatGptAuthFixture; +use app_test_support::McpProcess; +use app_test_support::to_response; +use app_test_support::write_chatgpt_auth; +use axum::Router; +use codex_app_server_protocol::JSONRPCError; +use codex_app_server_protocol::JSONRPCResponse; +use codex_app_server_protocol::McpResourceContent; +use codex_app_server_protocol::McpResourceReadParams; +use codex_app_server_protocol::McpResourceReadResponse; +use codex_app_server_protocol::RequestId; +use codex_app_server_protocol::ThreadStartParams; +use codex_app_server_protocol::ThreadStartResponse; +use codex_config::types::AuthCredentialsStoreMode; +use core_test_support::responses; +use pretty_assertions::assert_eq; +use rmcp::handler::server::ServerHandler; +use rmcp::model::ProtocolVersion; +use rmcp::model::ReadResourceRequestParams; +use rmcp::model::ReadResourceResult; +use rmcp::model::ResourceContents; +use rmcp::model::ServerCapabilities; +use rmcp::model::ServerInfo; +use rmcp::service::RequestContext; +use rmcp::service::RoleServer; +use rmcp::transport::StreamableHttpServerConfig; +use rmcp::transport::StreamableHttpService; +use rmcp::transport::streamable_http_server::session::local::LocalSessionManager; +use tempfile::TempDir; +use tokio::net::TcpListener; +use tokio::time::timeout; + +const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(10); +const TEST_RESOURCE_URI: &str = "test://codex/resource"; +const TEST_BLOB_RESOURCE_URI: &str = "test://codex/resource.bin"; +const TEST_RESOURCE_BLOB: &str = "YmluYXJ5LXJlc291cmNl"; +const TEST_RESOURCE_TEXT: &str = "Resource body from the MCP server."; + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn mcp_resource_read_returns_resource_contents() -> Result<()> { + let responses_server = responses::start_mock_server().await; + let listener = TcpListener::bind("127.0.0.1:0").await?; + let addr = listener.local_addr()?; + let apps_server_url = format!("http://{addr}"); + + let mcp_service = StreamableHttpService::new( + move || Ok(ResourceAppsMcpServer), + Arc::new(LocalSessionManager::default()), + StreamableHttpServerConfig::default(), + ); + let router = Router::new().nest_service("/api/codex/apps", mcp_service); + let apps_server_handle = tokio::spawn(async move { + let _ = axum::serve(listener, router).await; + }); + + let codex_home = TempDir::new()?; + let responses_server_uri = responses_server.uri(); + std::fs::write( + codex_home.path().join("config.toml"), + format!( + r#" +model = "mock-model" +approval_policy = "untrusted" +sandbox_mode = "read-only" + +model_provider = "mock_provider" +chatgpt_base_url = "{apps_server_url}" +mcp_oauth_credentials_store = "file" + +[features] +apps = true + +[model_providers.mock_provider] +name = "Mock provider for test" +base_url = "{responses_server_uri}/v1" +wire_api = "responses" +request_max_retries = 0 +stream_max_retries = 0 +"# + ), + )?; + write_chatgpt_auth( + codex_home.path(), + ChatGptAuthFixture::new("chatgpt-token") + .account_id("account-123") + .chatgpt_user_id("user-123") + .chatgpt_account_id("account-123"), + AuthCredentialsStoreMode::File, + )?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let thread_start_id = mcp + .send_thread_start_request(ThreadStartParams { + model: Some("mock-model".to_string()), + ..Default::default() + }) + .await?; + let thread_start_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(thread_start_id)), + ) + .await??; + let ThreadStartResponse { thread, .. } = to_response(thread_start_resp)?; + + let read_request_id = mcp + .send_mcp_resource_read_request(McpResourceReadParams { + thread_id: thread.id, + server: "codex_apps".to_string(), + uri: TEST_RESOURCE_URI.to_string(), + }) + .await?; + let read_response: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(read_request_id)), + ) + .await??; + + assert_eq!( + to_response::(read_response)?, + McpResourceReadResponse { + contents: vec![ + McpResourceContent::Text { + uri: TEST_RESOURCE_URI.to_string(), + mime_type: Some("text/markdown".to_string()), + text: TEST_RESOURCE_TEXT.to_string(), + meta: None, + }, + McpResourceContent::Blob { + uri: TEST_BLOB_RESOURCE_URI.to_string(), + mime_type: Some("application/octet-stream".to_string()), + blob: TEST_RESOURCE_BLOB.to_string(), + meta: None, + }, + ], + } + ); + + apps_server_handle.abort(); + let _ = apps_server_handle.await; + Ok(()) +} + +#[tokio::test] +async fn mcp_resource_read_returns_error_for_unknown_thread() -> Result<()> { + let codex_home = TempDir::new()?; + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let request_id = mcp + .send_mcp_resource_read_request(McpResourceReadParams { + thread_id: "00000000-0000-4000-8000-000000000000".to_string(), + server: "codex_apps".to_string(), + uri: TEST_RESOURCE_URI.to_string(), + }) + .await?; + let error: JSONRPCError = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_error_message(RequestId::Integer(request_id)), + ) + .await??; + + assert!( + error.error.message.contains("thread not found"), + "expected thread-not-found error, got: {error:?}" + ); + + Ok(()) +} + +#[derive(Clone, Default)] +struct ResourceAppsMcpServer; + +impl ServerHandler for ResourceAppsMcpServer { + fn get_info(&self) -> ServerInfo { + ServerInfo { + protocol_version: ProtocolVersion::V_2025_06_18, + capabilities: ServerCapabilities::builder().enable_resources().build(), + ..ServerInfo::default() + } + } + + async fn read_resource( + &self, + request: ReadResourceRequestParams, + _context: RequestContext, + ) -> Result { + let uri = request.uri; + if uri != TEST_RESOURCE_URI { + return Err(rmcp::ErrorData::resource_not_found( + format!("resource not found: {uri}"), + None, + )); + } + + Ok(ReadResourceResult { + contents: vec![ + ResourceContents::TextResourceContents { + uri: TEST_RESOURCE_URI.to_string(), + mime_type: Some("text/markdown".to_string()), + text: TEST_RESOURCE_TEXT.to_string(), + meta: None, + }, + ResourceContents::BlobResourceContents { + uri: TEST_BLOB_RESOURCE_URI.to_string(), + mime_type: Some("application/octet-stream".to_string()), + blob: TEST_RESOURCE_BLOB.to_string(), + meta: None, + }, + ], + }) + } +} diff --git a/codex-rs/app-server/tests/suite/v2/mcp_server_elicitation.rs b/codex-rs/app-server/tests/suite/v2/mcp_server_elicitation.rs index 7a50092b99..6326677250 100644 --- a/codex-rs/app-server/tests/suite/v2/mcp_server_elicitation.rs +++ b/codex-rs/app-server/tests/suite/v2/mcp_server_elicitation.rs @@ -31,7 +31,7 @@ use codex_app_server_protocol::TurnStartParams; use codex_app_server_protocol::TurnStartResponse; use codex_app_server_protocol::TurnStatus; use codex_app_server_protocol::UserInput as V2UserInput; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use core_test_support::responses; use pretty_assertions::assert_eq; use rmcp::handler::server::ServerHandler; diff --git a/codex-rs/app-server/tests/suite/v2/mcp_server_status.rs b/codex-rs/app-server/tests/suite/v2/mcp_server_status.rs index 8b3844b521..e637be237d 100644 --- a/codex-rs/app-server/tests/suite/v2/mcp_server_status.rs +++ b/codex-rs/app-server/tests/suite/v2/mcp_server_status.rs @@ -12,15 +12,20 @@ use app_test_support::write_mock_responses_config_toml; use axum::Router; use codex_app_server_protocol::ListMcpServerStatusParams; use codex_app_server_protocol::ListMcpServerStatusResponse; +use codex_app_server_protocol::McpServerStatusDetail; use codex_app_server_protocol::RequestId; use pretty_assertions::assert_eq; use rmcp::handler::server::ServerHandler; use rmcp::model::JsonObject; +use rmcp::model::ListResourceTemplatesResult; +use rmcp::model::ListResourcesResult; use rmcp::model::ListToolsResult; +use rmcp::model::PaginatedRequestParams; use rmcp::model::ServerCapabilities; use rmcp::model::ServerInfo; use rmcp::model::Tool; use rmcp::model::ToolAnnotations; +use rmcp::service::RequestContext; use rmcp::transport::StreamableHttpServerConfig; use rmcp::transport::StreamableHttpService; use rmcp::transport::streamable_http_server::session::local::LocalSessionManager; @@ -64,6 +69,7 @@ url = "{mcp_server_url}/mcp" .send_list_mcp_server_status_request(ListMcpServerStatusParams { cursor: None, limit: None, + detail: None, }) .await?; let response = timeout( @@ -127,6 +133,133 @@ impl ServerHandler for McpStatusServer { } } +#[derive(Clone)] +struct SlowInventoryServer { + tool_name: Arc, +} + +impl ServerHandler for SlowInventoryServer { + fn get_info(&self) -> ServerInfo { + ServerInfo { + capabilities: ServerCapabilities::builder() + .enable_tools() + .enable_resources() + .build(), + ..ServerInfo::default() + } + } + + async fn list_tools( + &self, + _request: Option, + _context: RequestContext, + ) -> Result { + let input_schema: JsonObject = serde_json::from_value(json!({ + "type": "object", + "additionalProperties": false + })) + .map_err(|err| rmcp::ErrorData::internal_error(err.to_string(), None))?; + + let mut tool = Tool::new( + Cow::Owned(self.tool_name.as_ref().clone()), + Cow::Borrowed("Look up test data."), + Arc::new(input_schema), + ); + tool.annotations = Some(ToolAnnotations::new().read_only(true)); + + Ok(ListToolsResult { + tools: vec![tool], + next_cursor: None, + meta: None, + }) + } + + async fn list_resources( + &self, + _request: Option, + _context: RequestContext, + ) -> Result { + tokio::time::sleep(Duration::from_secs(2)).await; + Ok(ListResourcesResult { + resources: Vec::new(), + next_cursor: None, + meta: None, + }) + } + + async fn list_resource_templates( + &self, + _request: Option, + _context: RequestContext, + ) -> Result { + tokio::time::sleep(Duration::from_secs(2)).await; + Ok(ListResourceTemplatesResult { + resource_templates: Vec::new(), + next_cursor: None, + meta: None, + }) + } +} + +#[tokio::test] +async fn mcp_server_status_list_tools_and_auth_only_skips_slow_inventory_calls() -> Result<()> { + let server = create_mock_responses_server_sequence_unchecked(Vec::new()).await; + let (mcp_server_url, mcp_server_handle) = start_slow_inventory_mcp_server("lookup").await?; + let codex_home = TempDir::new()?; + write_mock_responses_config_toml( + codex_home.path(), + &server.uri(), + &BTreeMap::new(), + /*auto_compact_limit*/ 1024, + /*requires_openai_auth*/ None, + "mock_provider", + "compact", + )?; + + let config_path = codex_home.path().join("config.toml"); + let mut config_toml = std::fs::read_to_string(&config_path)?; + config_toml.push_str(&format!( + r#" +[mcp_servers.some-server] +url = "{mcp_server_url}/mcp" +"# + )); + std::fs::write(config_path, config_toml)?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let request_id = mcp + .send_list_mcp_server_status_request(ListMcpServerStatusParams { + cursor: None, + limit: None, + detail: Some(McpServerStatusDetail::ToolsAndAuthOnly), + }) + .await?; + let response = timeout( + Duration::from_millis(500), + mcp.read_stream_until_response_message(RequestId::Integer(request_id)), + ) + .await??; + let response: ListMcpServerStatusResponse = to_response(response)?; + + assert_eq!(response.next_cursor, None); + assert_eq!(response.data.len(), 1); + let status = &response.data[0]; + assert_eq!(status.name, "some-server"); + assert_eq!( + status.tools.keys().cloned().collect::>(), + BTreeSet::from(["lookup".to_string()]) + ); + assert_eq!(status.resources, Vec::new()); + assert_eq!(status.resource_templates, Vec::new()); + + mcp_server_handle.abort(); + let _ = mcp_server_handle.await; + + Ok(()) +} + #[tokio::test] async fn mcp_server_status_list_does_not_duplicate_tools_for_sanitized_name_collisions() -> Result<()> { @@ -165,6 +298,7 @@ url = "{underscore_server_url}/mcp" .send_list_mcp_server_status_request(ListMcpServerStatusParams { cursor: None, limit: None, + detail: None, }) .await?; let response = timeout( @@ -215,3 +349,25 @@ async fn start_mcp_server(tool_name: &str) -> Result<(String, JoinHandle<()>)> { Ok((format!("http://{addr}"), handle)) } + +async fn start_slow_inventory_mcp_server(tool_name: &str) -> Result<(String, JoinHandle<()>)> { + let listener = TcpListener::bind("127.0.0.1:0").await?; + let addr = listener.local_addr()?; + let tool_name = Arc::new(tool_name.to_string()); + let mcp_service = StreamableHttpService::new( + move || { + Ok(SlowInventoryServer { + tool_name: Arc::clone(&tool_name), + }) + }, + Arc::new(LocalSessionManager::default()), + StreamableHttpServerConfig::default(), + ); + let router = Router::new().nest_service("/mcp", mcp_service); + + let handle = tokio::spawn(async move { + let _ = axum::serve(listener, router).await; + }); + + Ok((format!("http://{addr}"), handle)) +} diff --git a/codex-rs/app-server/tests/suite/v2/mod.rs b/codex-rs/app-server/tests/suite/v2/mod.rs index b8d5546580..6cd67daa58 100644 --- a/codex-rs/app-server/tests/suite/v2/mod.rs +++ b/codex-rs/app-server/tests/suite/v2/mod.rs @@ -14,6 +14,7 @@ mod experimental_api; mod experimental_feature_list; mod fs; mod initialize; +mod mcp_resource; mod mcp_server_elicitation; mod mcp_server_status; mod model_list; diff --git a/codex-rs/app-server/tests/suite/v2/plugin_install.rs b/codex-rs/app-server/tests/suite/v2/plugin_install.rs index 95630079cc..a3bea53172 100644 --- a/codex-rs/app-server/tests/suite/v2/plugin_install.rs +++ b/codex-rs/app-server/tests/suite/v2/plugin_install.rs @@ -25,7 +25,7 @@ use codex_app_server_protocol::PluginAuthPolicy; use codex_app_server_protocol::PluginInstallParams; use codex_app_server_protocol::PluginInstallResponse; use codex_app_server_protocol::RequestId; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use rmcp::handler::server::ServerHandler; diff --git a/codex-rs/app-server/tests/suite/v2/plugin_list.rs b/codex-rs/app-server/tests/suite/v2/plugin_list.rs index d1ee1aa203..8bc4a8598e 100644 --- a/codex-rs/app-server/tests/suite/v2/plugin_list.rs +++ b/codex-rs/app-server/tests/suite/v2/plugin_list.rs @@ -15,8 +15,8 @@ use codex_app_server_protocol::PluginMarketplaceEntry; use codex_app_server_protocol::PluginSource; use codex_app_server_protocol::PluginSummary; use codex_app_server_protocol::RequestId; +use codex_config::types::AuthCredentialsStoreMode; use codex_core::config::set_project_trust_level; -use codex_login::AuthCredentialsStoreMode; use codex_protocol::config_types::TrustLevel; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; diff --git a/codex-rs/app-server/tests/suite/v2/plugin_read.rs b/codex-rs/app-server/tests/suite/v2/plugin_read.rs index 32e799468e..115a04f0f5 100644 --- a/codex-rs/app-server/tests/suite/v2/plugin_read.rs +++ b/codex-rs/app-server/tests/suite/v2/plugin_read.rs @@ -23,7 +23,7 @@ use codex_app_server_protocol::PluginInstallPolicy; use codex_app_server_protocol::PluginReadParams; use codex_app_server_protocol::PluginReadResponse; use codex_app_server_protocol::RequestId; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use rmcp::handler::server::ServerHandler; diff --git a/codex-rs/app-server/tests/suite/v2/plugin_uninstall.rs b/codex-rs/app-server/tests/suite/v2/plugin_uninstall.rs index d0122c62f9..00fabe4832 100644 --- a/codex-rs/app-server/tests/suite/v2/plugin_uninstall.rs +++ b/codex-rs/app-server/tests/suite/v2/plugin_uninstall.rs @@ -11,7 +11,7 @@ use codex_app_server_protocol::JSONRPCResponse; use codex_app_server_protocol::PluginUninstallParams; use codex_app_server_protocol::PluginUninstallResponse; use codex_app_server_protocol::RequestId; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use pretty_assertions::assert_eq; use serde_json::json; use tempfile::TempDir; diff --git a/codex-rs/app-server/tests/suite/v2/rate_limits.rs b/codex-rs/app-server/tests/suite/v2/rate_limits.rs index 64c155081b..203d664940 100644 --- a/codex-rs/app-server/tests/suite/v2/rate_limits.rs +++ b/codex-rs/app-server/tests/suite/v2/rate_limits.rs @@ -10,7 +10,7 @@ use codex_app_server_protocol::LoginAccountResponse; use codex_app_server_protocol::RateLimitSnapshot; use codex_app_server_protocol::RateLimitWindow; use codex_app_server_protocol::RequestId; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_protocol::account::PlanType as AccountPlanType; use pretty_assertions::assert_eq; use serde_json::json; diff --git a/codex-rs/app-server/tests/suite/v2/thread_fork.rs b/codex-rs/app-server/tests/suite/v2/thread_fork.rs index 0849fe9b32..1137f09848 100644 --- a/codex-rs/app-server/tests/suite/v2/thread_fork.rs +++ b/codex-rs/app-server/tests/suite/v2/thread_fork.rs @@ -24,8 +24,10 @@ use codex_app_server_protocol::TurnStartParams; use codex_app_server_protocol::TurnStartResponse; use codex_app_server_protocol::TurnStatus; use codex_app_server_protocol::UserInput; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_login::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR; +use core_test_support::responses; +use core_test_support::skip_if_no_network; use pretty_assertions::assert_eq; use serde_json::Value; use serde_json::json; @@ -183,6 +185,170 @@ async fn thread_fork_creates_new_thread_and_emits_started() -> Result<()> { Ok(()) } +#[tokio::test] +async fn thread_fork_honors_explicit_null_thread_instructions() -> Result<()> { + skip_if_no_network!(Ok(())); + + let server = responses::start_mock_server().await; + let body = responses::sse(vec![ + responses::ev_response_created("resp-1"), + responses::ev_assistant_message("msg-1", "Done"), + responses::ev_completed("resp-1"), + ]); + let response_mock = + responses::mount_sse_sequence(&server, vec![body.clone(), body.clone(), body]).await; + + let codex_home = TempDir::new()?; + create_config_toml(codex_home.path(), &server.uri())?; + let config_path = codex_home.path().join("config.toml"); + let mut config_toml = std::fs::read_to_string(&config_path)?; + let first_table_index = config_toml + .find("\n[") + .expect("test config must include a table header"); + config_toml.insert_str( + first_table_index, + "\ndeveloper_instructions = \"Config developer instructions sentinel\"\n", + ); + std::fs::write(config_path, config_toml)?; + + let conversation_id = create_fake_rollout( + codex_home.path(), + "2025-01-05T12-00-00", + "2025-01-05T12:00:00Z", + "Saved user message", + Some("mock_provider"), + /*git_info*/ None, + )?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let disabled_instruction_config = json!({ + "include_permissions_instructions": false, + "include_apps_instructions": false, + "include_environment_context": false, + "features.apps": false, + "features.plugins": false, + "features.codex_hooks": false, + "skills.bundled.enabled": false, + }); + + let fork_params = [ + ( + json!({ + "threadId": conversation_id.clone(), + "config": disabled_instruction_config.clone(), + }), + /*expect_instructions*/ true, + ), + ( + json!({ + "threadId": conversation_id.clone(), + "config": disabled_instruction_config.clone(), + "baseInstructions": null, + "developerInstructions": null, + }), + /*expect_instructions*/ false, + ), + ]; + + let mut forked_thread_ids = Vec::new(); + for (params, _expect_instructions) in fork_params { + let fork_id = mcp.send_raw_request("thread/fork", Some(params)).await?; + let fork_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(fork_id)), + ) + .await??; + let ThreadForkResponse { thread, .. } = to_response::(fork_resp)?; + forked_thread_ids.push(thread.id.clone()); + + let turn_id = mcp + .send_turn_start_request(TurnStartParams { + thread_id: thread.id, + input: vec![UserInput::Text { + text: "continue".to_string(), + text_elements: Vec::new(), + }], + ..Default::default() + }) + .await?; + let turn_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(turn_id)), + ) + .await??; + let _: TurnStartResponse = to_response::(turn_resp)?; + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("turn/completed"), + ) + .await??; + } + + let refork_id = mcp + .send_raw_request( + "thread/fork", + Some(json!({ + "threadId": forked_thread_ids[1].clone(), + "config": disabled_instruction_config.clone(), + })), + ) + .await?; + let refork_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(refork_id)), + ) + .await??; + let ThreadForkResponse { thread, .. } = to_response::(refork_resp)?; + let turn_id = mcp + .send_turn_start_request(TurnStartParams { + thread_id: thread.id, + input: vec![UserInput::Text { + text: "continue again".to_string(), + text_elements: Vec::new(), + }], + ..Default::default() + }) + .await?; + let turn_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(turn_id)), + ) + .await??; + let _: TurnStartResponse = to_response::(turn_resp)?; + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("turn/completed"), + ) + .await??; + + let requests = response_mock.requests(); + assert_eq!(requests.len(), 3); + for (request, expect_instructions) in requests.into_iter().zip([true, false, false]) { + let payload = request.body_json(); + assert_eq!( + payload.get("instructions").is_some(), + expect_instructions, + "unexpected instructions field in payload: {payload:?}" + ); + let developer_texts = request.message_input_texts("developer"); + assert_eq!( + developer_texts + .iter() + .any(|text| { text.contains("Config developer instructions sentinel") }), + expect_instructions, + "unexpected config developer instruction presence: {developer_texts:?}" + ); + assert!( + developer_texts.iter().all(|text| !text.is_empty()), + "did not expect empty developer instruction messages: {developer_texts:?}" + ); + } + + Ok(()) +} + #[tokio::test] async fn thread_fork_tracks_thread_initialized_analytics() -> Result<()> { let server = create_mock_responses_server_repeating_assistant("Done").await; diff --git a/codex-rs/app-server/tests/suite/v2/thread_metadata_update.rs b/codex-rs/app-server/tests/suite/v2/thread_metadata_update.rs index f5c793f744..8bf9a8a9aa 100644 --- a/codex-rs/app-server/tests/suite/v2/thread_metadata_update.rs +++ b/codex-rs/app-server/tests/suite/v2/thread_metadata_update.rs @@ -223,59 +223,6 @@ async fn thread_metadata_update_repairs_missing_sqlite_row_for_stored_thread() - Ok(()) } -#[tokio::test] -async fn thread_metadata_update_repairs_stored_thread_before_backfill_completes() -> Result<()> { - let server = create_mock_responses_server_repeating_assistant("Done").await; - let codex_home = TempDir::new()?; - create_config_toml(codex_home.path(), &server.uri())?; - let _state_db = - StateRuntime::init(codex_home.path().to_path_buf(), "mock_provider".into()).await?; - - let preview = "Stored thread preview before backfill"; - let thread_id = create_fake_rollout( - codex_home.path(), - "2025-01-05T12-30-00", - "2025-01-05T12:30:00Z", - preview, - Some("mock_provider"), - /*git_info*/ None, - )?; - - let mut mcp = McpProcess::new(codex_home.path()).await?; - timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; - - let update_id = mcp - .send_thread_metadata_update_request(ThreadMetadataUpdateParams { - thread_id: thread_id.clone(), - git_info: Some(ThreadMetadataGitInfoUpdateParams { - sha: None, - branch: Some(Some("feature/pending-backfill".to_string())), - origin_url: None, - }), - }) - .await?; - let update_resp: JSONRPCResponse = timeout( - DEFAULT_READ_TIMEOUT, - mcp.read_stream_until_response_message(RequestId::Integer(update_id)), - ) - .await??; - let ThreadMetadataUpdateResponse { thread: updated } = - to_response::(update_resp)?; - - assert_eq!(updated.id, thread_id); - assert_eq!(updated.preview, preview); - assert_eq!( - updated.git_info, - Some(GitInfo { - sha: None, - branch: Some("feature/pending-backfill".to_string()), - origin_url: None, - }) - ); - - Ok(()) -} - #[tokio::test] async fn thread_metadata_update_repairs_loaded_thread_without_resetting_summary() -> Result<()> { let server = create_mock_responses_server_repeating_assistant("Done").await; diff --git a/codex-rs/app-server/tests/suite/v2/thread_resume.rs b/codex-rs/app-server/tests/suite/v2/thread_resume.rs index 83ed147e12..4c92b61c25 100644 --- a/codex-rs/app-server/tests/suite/v2/thread_resume.rs +++ b/codex-rs/app-server/tests/suite/v2/thread_resume.rs @@ -38,7 +38,7 @@ use codex_app_server_protocol::TurnStartParams; use codex_app_server_protocol::TurnStartResponse; use codex_app_server_protocol::TurnStatus; use codex_app_server_protocol::UserInput; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_login::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR; use codex_protocol::ThreadId; use codex_protocol::config_types::Personality; @@ -377,6 +377,7 @@ stream_max_retries = 0 agent_role: None, model_provider: Some("mock_provider".to_string()), base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, }; @@ -492,6 +493,7 @@ async fn thread_resume_and_read_interrupt_incomplete_rollout_turn_when_thread_is "type": "event_msg", "payload": serde_json::to_value(EventMsg::TurnStarted(TurnStartedEvent { turn_id: turn_id.to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), }))?, diff --git a/codex-rs/app-server/tests/suite/v2/thread_start.rs b/codex-rs/app-server/tests/suite/v2/thread_start.rs index 7907e621b8..50c373ac93 100644 --- a/codex-rs/app-server/tests/suite/v2/thread_start.rs +++ b/codex-rs/app-server/tests/suite/v2/thread_start.rs @@ -18,9 +18,9 @@ use codex_app_server_protocol::ThreadStartResponse; use codex_app_server_protocol::ThreadStartedNotification; use codex_app_server_protocol::ThreadStatus; use codex_app_server_protocol::ThreadStatusChangedNotification; +use codex_config::types::AuthCredentialsStoreMode; use codex_core::config::set_project_trust_level; use codex_git_utils::resolve_root_git_project_for_trust; -use codex_login::AuthCredentialsStoreMode; use codex_login::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR; use codex_protocol::config_types::ServiceTier; use codex_protocol::config_types::TrustLevel; diff --git a/codex-rs/app-server/tests/suite/v2/turn_interrupt.rs b/codex-rs/app-server/tests/suite/v2/turn_interrupt.rs index 8f944dca07..2850c7b74f 100644 --- a/codex-rs/app-server/tests/suite/v2/turn_interrupt.rs +++ b/codex-rs/app-server/tests/suite/v2/turn_interrupt.rs @@ -50,7 +50,7 @@ async fn turn_interrupt_aborts_running_turn() -> Result<()> { "call_sleep", )?]) .await; - create_config_toml(&codex_home, &server.uri(), "never")?; + create_config_toml(&codex_home, &server.uri(), "never", "danger-full-access")?; let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; @@ -124,14 +124,11 @@ async fn turn_interrupt_aborts_running_turn() -> Result<()> { #[tokio::test] async fn turn_interrupt_resolves_pending_command_approval_request() -> Result<()> { - #[cfg(target_os = "windows")] let shell_command = vec![ - "powershell".to_string(), - "-Command".to_string(), - "Start-Sleep -Seconds 10".to_string(), + "python3".to_string(), + "-c".to_string(), + "print(42)".to_string(), ]; - #[cfg(not(target_os = "windows"))] - let shell_command = vec!["sleep".to_string(), "10".to_string()]; let tmp = TempDir::new()?; let codex_home = tmp.path().join("codex_home"); @@ -143,10 +140,10 @@ async fn turn_interrupt_resolves_pending_command_approval_request() -> Result<() shell_command.clone(), Some(&working_directory), Some(10_000), - "call_sleep_approval", + "call_python_approval", )?]) .await; - create_config_toml(&codex_home, &server.uri(), "untrusted")?; + create_config_toml(&codex_home, &server.uri(), "untrusted", "read-only")?; let mut mcp = McpProcess::new(&codex_home).await?; timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; @@ -168,7 +165,7 @@ async fn turn_interrupt_resolves_pending_command_approval_request() -> Result<() .send_turn_start_request(TurnStartParams { thread_id: thread.id.clone(), input: vec![V2UserInput::Text { - text: "run sleep".to_string(), + text: "run python".to_string(), text_elements: Vec::new(), }], cwd: Some(working_directory), @@ -190,7 +187,7 @@ async fn turn_interrupt_resolves_pending_command_approval_request() -> Result<() let ServerRequest::CommandExecutionRequestApproval { request_id, params } = request else { panic!("expected CommandExecutionRequestApproval request"); }; - assert_eq!(params.item_id, "call_sleep_approval"); + assert_eq!(params.item_id, "call_python_approval"); assert_eq!(params.thread_id, thread.id); assert_eq!(params.turn_id, turn.id); @@ -242,6 +239,7 @@ fn create_config_toml( codex_home: &std::path::Path, server_uri: &str, approval_policy: &str, + sandbox_mode: &str, ) -> std::io::Result<()> { let config_toml = codex_home.join("config.toml"); std::fs::write( @@ -250,7 +248,7 @@ fn create_config_toml( r#" model = "mock-model" approval_policy = "{approval_policy}" -sandbox_mode = "danger-full-access" +sandbox_mode = "{sandbox_mode}" model_provider = "mock_provider" diff --git a/codex-rs/app-server/tests/suite/v2/turn_start.rs b/codex-rs/app-server/tests/suite/v2/turn_start.rs index b99d1cb73e..70c388a19d 100644 --- a/codex-rs/app-server/tests/suite/v2/turn_start.rs +++ b/codex-rs/app-server/tests/suite/v2/turn_start.rs @@ -43,7 +43,7 @@ use codex_app_server_protocol::TurnStartResponse; use codex_app_server_protocol::TurnStartedNotification; use codex_app_server_protocol::TurnStatus; use codex_app_server_protocol::UserInput as V2UserInput; -use codex_core::config::ConfigToml; +use codex_config::config_toml::ConfigToml; use codex_core::personality_migration::PERSONALITY_MIGRATION_FILENAME; use codex_features::FEATURES; use codex_features::Feature; @@ -152,6 +152,120 @@ async fn turn_start_sends_originator_header() -> Result<()> { Ok(()) } +#[tokio::test] +async fn turn_start_honors_explicit_null_thread_instructions() -> Result<()> { + skip_if_no_network!(Ok(())); + + let server = responses::start_mock_server().await; + let body = responses::sse(vec![ + responses::ev_response_created("resp-1"), + responses::ev_assistant_message("msg-1", "Done"), + responses::ev_completed("resp-1"), + ]); + let response_mock = responses::mount_sse_sequence(&server, vec![body.clone(), body]).await; + + let codex_home = TempDir::new()?; + create_config_toml(codex_home.path(), &server.uri(), "never", &BTreeMap::new())?; + let config_path = codex_home.path().join("config.toml"); + let mut config_toml = std::fs::read_to_string(&config_path)?; + let first_table_index = config_toml + .find("\n[") + .expect("test config must include a table header"); + config_toml.insert_str( + first_table_index, + "\ndeveloper_instructions = \"Config developer instructions sentinel\"\n", + ); + std::fs::write(config_path, config_toml)?; + + let mut mcp = McpProcess::new(codex_home.path()).await?; + timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??; + + let disabled_instruction_config = json!({ + "include_permissions_instructions": false, + "include_apps_instructions": false, + "include_environment_context": false, + "features.apps": false, + "features.plugins": false, + "features.codex_hooks": false, + "skills.bundled.enabled": false, + }); + + let thread_start_params = [ + ( + json!({ + "model": "mock-model", + "config": disabled_instruction_config.clone(), + }), + /*expect_instructions*/ true, + ), + ( + json!({ + "model": "mock-model", + "config": disabled_instruction_config.clone(), + "baseInstructions": null, + "developerInstructions": null, + }), + /*expect_instructions*/ false, + ), + ]; + + for (params, _expect_instructions) in thread_start_params { + let thread_req = mcp.send_raw_request("thread/start", Some(params)).await?; + let thread_resp: JSONRPCResponse = timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(thread_req)), + ) + .await??; + let ThreadStartResponse { thread, .. } = to_response::(thread_resp)?; + + let turn_req = mcp + .send_turn_start_request(TurnStartParams { + thread_id: thread.id, + input: vec![V2UserInput::Text { + text: "Hello".to_string(), + text_elements: Vec::new(), + }], + ..Default::default() + }) + .await?; + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_response_message(RequestId::Integer(turn_req)), + ) + .await??; + timeout( + DEFAULT_READ_TIMEOUT, + mcp.read_stream_until_notification_message("turn/completed"), + ) + .await??; + } + + let requests = response_mock.requests(); + assert_eq!(requests.len(), 2); + for (request, expect_instructions) in requests.into_iter().zip([true, false]) { + let payload = request.body_json(); + assert_eq!( + payload.get("instructions").is_some(), + expect_instructions, + "unexpected instructions field in payload: {payload:?}" + ); + let developer_texts = request.message_input_texts("developer"); + assert_eq!( + developer_texts + .iter() + .any(|text| { text.contains("Config developer instructions sentinel") }), + expect_instructions, + "unexpected config developer instruction presence: {developer_texts:?}" + ); + assert!( + developer_texts.iter().all(|text| !text.is_empty()), + "did not expect empty developer instruction messages: {developer_texts:?}" + ); + } + + Ok(()) +} + #[tokio::test] async fn turn_start_emits_user_message_item_with_text_elements() -> Result<()> { let responses = vec![create_final_assistant_message_sse_response("Done")?]; diff --git a/codex-rs/apply-patch/Cargo.toml b/codex-rs/apply-patch/Cargo.toml index 2fad46c2af..ba4fa5e48e 100644 --- a/codex-rs/apply-patch/Cargo.toml +++ b/codex-rs/apply-patch/Cargo.toml @@ -17,8 +17,11 @@ workspace = true [dependencies] anyhow = { workspace = true } +codex-exec-server = { workspace = true } +codex-utils-absolute-path = { workspace = true } similar = { workspace = true } thiserror = { workspace = true } +tokio = { workspace = true, features = ["macros", "rt"] } tree-sitter = { workspace = true } tree-sitter-bash = { workspace = true } diff --git a/codex-rs/apply-patch/src/invocation.rs b/codex-rs/apply-patch/src/invocation.rs index 519671798d..3b0db2fa9c 100644 --- a/codex-rs/apply-patch/src/invocation.rs +++ b/codex-rs/apply-patch/src/invocation.rs @@ -2,6 +2,8 @@ use std::collections::HashMap; use std::path::Path; use std::sync::LazyLock; +use codex_exec_server::ExecutorFileSystem; +use codex_utils_absolute_path::AbsolutePathBuf; use tree_sitter::Parser; use tree_sitter::Query; use tree_sitter::QueryCursor; @@ -129,7 +131,11 @@ pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch { /// cwd must be an absolute path so that we can resolve relative paths in the /// patch. -pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApplyPatchVerified { +pub async fn maybe_parse_apply_patch_verified( + argv: &[String], + cwd: &AbsolutePathBuf, + fs: &dyn ExecutorFileSystem, +) -> MaybeApplyPatchVerified { // Detect a raw patch body passed directly as the command or as the body of a shell // script. In these cases, report an explicit error rather than applying the patch. if let [body] = argv @@ -151,24 +157,20 @@ pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApp }) => { let effective_cwd = workdir .as_ref() - .map(|dir| { - let path = Path::new(dir); - if path.is_absolute() { - path.to_path_buf() - } else { - cwd.join(path) - } - }) - .unwrap_or_else(|| cwd.to_path_buf()); + .map(|dir| cwd.join(Path::new(dir))) + .unwrap_or_else(|| cwd.clone()); let mut changes = HashMap::new(); for hunk in hunks { let path = hunk.resolve_path(&effective_cwd); match hunk { Hunk::AddFile { contents, .. } => { - changes.insert(path, ApplyPatchFileChange::Add { content: contents }); + changes.insert( + path.into_path_buf(), + ApplyPatchFileChange::Add { content: contents }, + ); } Hunk::DeleteFile { .. } => { - let content = match std::fs::read_to_string(&path) { + let content = match fs.read_file_text(&path).await { Ok(content) => content, Err(e) => { return MaybeApplyPatchVerified::CorrectnessError( @@ -179,7 +181,10 @@ pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApp ); } }; - changes.insert(path, ApplyPatchFileChange::Delete { content }); + changes.insert( + path.into_path_buf(), + ApplyPatchFileChange::Delete { content }, + ); } Hunk::UpdateFile { move_path, chunks, .. @@ -187,17 +192,17 @@ pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApp let ApplyPatchFileUpdate { unified_diff, content: contents, - } = match unified_diff_from_chunks(&path, &chunks) { + } = match unified_diff_from_chunks(&path, &chunks, fs).await { Ok(diff) => diff, Err(e) => { return MaybeApplyPatchVerified::CorrectnessError(e); } }; changes.insert( - path, + path.into_path_buf(), ApplyPatchFileChange::Update { unified_diff, - move_path: move_path.map(|p| effective_cwd.join(p)), + move_path: move_path.map(|p| effective_cwd.join(p).into_path_buf()), new_content: contents, }, ); @@ -371,7 +376,10 @@ fn extract_apply_patch_from_bash( #[cfg(test)] mod tests { use super::*; + use crate::unified_diff_from_chunks; use assert_matches::assert_matches; + use codex_exec_server::LOCAL_FS; + use codex_utils_absolute_path::test_support::PathExt; use pretty_assertions::assert_eq; use std::fs; use std::path::PathBuf; @@ -450,30 +458,40 @@ mod tests { ); } - #[test] - fn test_implicit_patch_single_arg_is_error() { + #[tokio::test] + async fn test_implicit_patch_single_arg_is_error() { let patch = "*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch".to_string(); let args = vec![patch]; let dir = tempdir().unwrap(); assert_matches!( - maybe_parse_apply_patch_verified(&args, dir.path()), + maybe_parse_apply_patch_verified( + &args, + &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), + LOCAL_FS.as_ref() + ) + .await, MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation) ); } - #[test] - fn test_implicit_patch_bash_script_is_error() { + #[tokio::test] + async fn test_implicit_patch_bash_script_is_error() { let script = "*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch"; let args = args_bash(script); let dir = tempdir().unwrap(); assert_matches!( - maybe_parse_apply_patch_verified(&args, dir.path()), + maybe_parse_apply_patch_verified( + &args, + &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), + LOCAL_FS.as_ref() + ) + .await, MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation) ); } - #[test] - fn test_literal() { + #[tokio::test] + async fn test_literal() { let args = strs_to_strings(&[ "apply_patch", r#"*** Begin Patch @@ -497,8 +515,8 @@ mod tests { } } - #[test] - fn test_literal_applypatch() { + #[tokio::test] + async fn test_literal_applypatch() { let args = strs_to_strings(&[ "applypatch", r#"*** Begin Patch @@ -522,20 +540,20 @@ mod tests { } } - #[test] - fn test_heredoc() { + #[tokio::test] + async fn test_heredoc() { assert_match(&heredoc_script(""), /*expected_workdir*/ None); } - #[test] - fn test_heredoc_non_login_shell() { + #[tokio::test] + async fn test_heredoc_non_login_shell() { let script = heredoc_script(""); let args = strs_to_strings(&["bash", "-c", &script]); assert_match_args(args, /*expected_workdir*/ None); } - #[test] - fn test_heredoc_applypatch() { + #[tokio::test] + async fn test_heredoc_applypatch() { let args = strs_to_strings(&[ "bash", "-lc", @@ -562,96 +580,96 @@ PATCH"#, } } - #[test] - fn test_powershell_heredoc() { + #[tokio::test] + async fn test_powershell_heredoc() { let script = heredoc_script(""); assert_match_args(args_powershell(&script), /*expected_workdir*/ None); } - #[test] - fn test_powershell_heredoc_no_profile() { + #[tokio::test] + async fn test_powershell_heredoc_no_profile() { let script = heredoc_script(""); assert_match_args( args_powershell_no_profile(&script), /*expected_workdir*/ None, ); } - #[test] - fn test_pwsh_heredoc() { + #[tokio::test] + async fn test_pwsh_heredoc() { let script = heredoc_script(""); assert_match_args(args_pwsh(&script), /*expected_workdir*/ None); } - #[test] - fn test_cmd_heredoc_with_cd() { + #[tokio::test] + async fn test_cmd_heredoc_with_cd() { let script = heredoc_script("cd foo && "); assert_match_args(args_cmd(&script), Some("foo")); } - #[test] - fn test_heredoc_with_leading_cd() { + #[tokio::test] + async fn test_heredoc_with_leading_cd() { assert_match(&heredoc_script("cd foo && "), Some("foo")); } - #[test] - fn test_cd_with_semicolon_is_ignored() { + #[tokio::test] + async fn test_cd_with_semicolon_is_ignored() { assert_not_match(&heredoc_script("cd foo; ")); } - #[test] - fn test_cd_or_apply_patch_is_ignored() { + #[tokio::test] + async fn test_cd_or_apply_patch_is_ignored() { assert_not_match(&heredoc_script("cd bar || ")); } - #[test] - fn test_cd_pipe_apply_patch_is_ignored() { + #[tokio::test] + async fn test_cd_pipe_apply_patch_is_ignored() { assert_not_match(&heredoc_script("cd bar | ")); } - #[test] - fn test_cd_single_quoted_path_with_spaces() { + #[tokio::test] + async fn test_cd_single_quoted_path_with_spaces() { assert_match(&heredoc_script("cd 'foo bar' && "), Some("foo bar")); } - #[test] - fn test_cd_double_quoted_path_with_spaces() { + #[tokio::test] + async fn test_cd_double_quoted_path_with_spaces() { assert_match(&heredoc_script("cd \"foo bar\" && "), Some("foo bar")); } - #[test] - fn test_echo_and_apply_patch_is_ignored() { + #[tokio::test] + async fn test_echo_and_apply_patch_is_ignored() { assert_not_match(&heredoc_script("echo foo && ")); } - #[test] - fn test_apply_patch_with_arg_is_ignored() { + #[tokio::test] + async fn test_apply_patch_with_arg_is_ignored() { let script = "apply_patch foo <<'PATCH'\n*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch\nPATCH"; assert_not_match(script); } - #[test] - fn test_double_cd_then_apply_patch_is_ignored() { + #[tokio::test] + async fn test_double_cd_then_apply_patch_is_ignored() { assert_not_match(&heredoc_script("cd foo && cd bar && ")); } - #[test] - fn test_cd_two_args_is_ignored() { + #[tokio::test] + async fn test_cd_two_args_is_ignored() { assert_not_match(&heredoc_script("cd foo bar && ")); } - #[test] - fn test_cd_then_apply_patch_then_extra_is_ignored() { + #[tokio::test] + async fn test_cd_then_apply_patch_then_extra_is_ignored() { let script = heredoc_script_ps("cd bar && ", " && echo done"); assert_not_match(&script); } - #[test] - fn test_echo_then_cd_and_apply_patch_is_ignored() { + #[tokio::test] + async fn test_echo_then_cd_and_apply_patch_is_ignored() { // Ensure preceding commands before the `cd && apply_patch <<...` sequence do not match. assert_not_match(&heredoc_script("echo foo; cd bar && ")); } - #[test] - fn test_unified_diff_last_line_replacement() { + #[tokio::test] + async fn test_unified_diff_last_line_replacement() { // Replace the very last line of the file. let dir = tempdir().unwrap(); let path = dir.path().join("last.txt"); @@ -674,7 +692,10 @@ PATCH"#, _ => panic!("Expected a single UpdateFile hunk"), }; - let diff = unified_diff_from_chunks(&path, chunks).unwrap(); + let path_abs = path.as_path().abs(); + let diff = unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref()) + .await + .unwrap(); let expected_diff = r#"@@ -2,2 +2,2 @@ bar -baz @@ -687,8 +708,8 @@ PATCH"#, assert_eq!(expected, diff); } - #[test] - fn test_unified_diff_insert_at_eof() { + #[tokio::test] + async fn test_unified_diff_insert_at_eof() { // Insert a new line at end‑of‑file. let dir = tempdir().unwrap(); let path = dir.path().join("insert.txt"); @@ -709,7 +730,10 @@ PATCH"#, _ => panic!("Expected a single UpdateFile hunk"), }; - let diff = unified_diff_from_chunks(&path, chunks).unwrap(); + let path_abs = path.as_path().abs(); + let diff = unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref()) + .await + .unwrap(); let expected_diff = r#"@@ -3 +3,2 @@ baz +quux @@ -721,8 +745,8 @@ PATCH"#, assert_eq!(expected, diff); } - #[test] - fn test_apply_patch_should_resolve_absolute_paths_in_cwd() { + #[tokio::test] + async fn test_apply_patch_should_resolve_absolute_paths_in_cwd() { let session_dir = tempdir().unwrap(); let relative_path = "source.txt"; @@ -742,7 +766,12 @@ PATCH"#, .to_string(), ]; - let result = maybe_parse_apply_patch_verified(&argv, session_dir.path()); + let result = maybe_parse_apply_patch_verified( + &argv, + &AbsolutePathBuf::from_absolute_path(session_dir.path()).unwrap(), + LOCAL_FS.as_ref(), + ) + .await; // Verify the patch contents - as otherwise we may have pulled contents // from the wrong file (as we're using relative paths) @@ -762,13 +791,13 @@ PATCH"#, }, )]), patch: argv[1].clone(), - cwd: session_dir.path().to_path_buf(), + cwd: AbsolutePathBuf::from_absolute_path(session_dir.path()).unwrap(), }) ); } - #[test] - fn test_apply_patch_resolves_move_path_with_effective_cwd() { + #[tokio::test] + async fn test_apply_patch_resolves_move_path_with_effective_cwd() { let session_dir = tempdir().unwrap(); let worktree_rel = "alt"; let worktree_dir = session_dir.path().join(worktree_rel); @@ -790,13 +819,18 @@ PATCH"#, let shell_script = format!("cd {worktree_rel} && apply_patch <<'PATCH'\n{patch}\nPATCH"); let argv = vec!["bash".into(), "-lc".into(), shell_script]; - let result = maybe_parse_apply_patch_verified(&argv, session_dir.path()); + let result = maybe_parse_apply_patch_verified( + &argv, + &AbsolutePathBuf::from_absolute_path(session_dir.path()).unwrap(), + LOCAL_FS.as_ref(), + ) + .await; let action = match result { MaybeApplyPatchVerified::Body(action) => action, other => panic!("expected verified body, got {other:?}"), }; - assert_eq!(action.cwd, worktree_dir); + assert_eq!(action.cwd.as_path(), worktree_dir.as_path()); let change = action .changes() diff --git a/codex-rs/apply-patch/src/lib.rs b/codex-rs/apply-patch/src/lib.rs index 3041c4b2cd..6b6aba2df3 100644 --- a/codex-rs/apply-patch/src/lib.rs +++ b/codex-rs/apply-patch/src/lib.rs @@ -4,11 +4,16 @@ mod seek_sequence; mod standalone_executable; use std::collections::HashMap; +use std::io; use std::path::Path; use std::path::PathBuf; use anyhow::Context; use anyhow::Result; +use codex_exec_server::CreateDirectoryOptions; +use codex_exec_server::ExecutorFileSystem; +use codex_exec_server::RemoveOptions; +use codex_utils_absolute_path::AbsolutePathBuf; pub use parser::Hunk; pub use parser::ParseError; use parser::ParseError::*; @@ -134,7 +139,7 @@ pub struct ApplyPatchAction { pub patch: String, /// The working directory that was used to resolve relative paths in the patch. - pub cwd: PathBuf, + pub cwd: AbsolutePathBuf, } impl ApplyPatchAction { @@ -149,11 +154,7 @@ impl ApplyPatchAction { /// Should be used exclusively for testing. (Not worth the overhead of /// creating a feature flag for this.) - pub fn new_add_for_test(path: &Path, content: String) -> Self { - if !path.is_absolute() { - panic!("path must be absolute"); - } - + pub fn new_add_for_test(path: &AbsolutePathBuf, content: String) -> Self { #[expect(clippy::expect_used)] let filename = path .file_name() @@ -170,20 +171,19 @@ impl ApplyPatchAction { #[expect(clippy::expect_used)] Self { changes, - cwd: path - .parent() - .expect("path should have parent") - .to_path_buf(), + cwd: path.parent().expect("path should have parent"), patch, } } } /// Applies the patch and prints the result to stdout/stderr. -pub fn apply_patch( +pub async fn apply_patch( patch: &str, + cwd: &AbsolutePathBuf, stdout: &mut impl std::io::Write, stderr: &mut impl std::io::Write, + fs: &dyn ExecutorFileSystem, ) -> Result<(), ApplyPatchError> { let hunks = match parse_patch(patch) { Ok(source) => source.hunks, @@ -207,45 +207,21 @@ pub fn apply_patch( } }; - apply_hunks(&hunks, stdout, stderr)?; + apply_hunks(&hunks, cwd, stdout, stderr, fs).await?; Ok(()) } /// Applies hunks and continues to update stdout/stderr -pub fn apply_hunks( +pub async fn apply_hunks( hunks: &[Hunk], + cwd: &AbsolutePathBuf, stdout: &mut impl std::io::Write, stderr: &mut impl std::io::Write, + fs: &dyn ExecutorFileSystem, ) -> Result<(), ApplyPatchError> { - let _existing_paths: Vec<&Path> = hunks - .iter() - .filter_map(|hunk| match hunk { - Hunk::AddFile { .. } => { - // The file is being added, so it doesn't exist yet. - None - } - Hunk::DeleteFile { path } => Some(path.as_path()), - Hunk::UpdateFile { - path, move_path, .. - } => match move_path { - Some(move_path) => { - if std::fs::metadata(move_path) - .map(|m| m.is_file()) - .unwrap_or(false) - { - Some(move_path.as_path()) - } else { - None - } - } - None => Some(path.as_path()), - }, - }) - .collect::>(); - // Delegate to a helper that applies each hunk to the filesystem. - match apply_hunks_to_files(hunks) { + match apply_hunks_to_files(hunks, cwd, fs).await { Ok(affected) => { print_summary(&affected, stdout).map_err(ApplyPatchError::from)?; Ok(()) @@ -267,7 +243,8 @@ pub fn apply_hunks( /// Applies each parsed patch hunk to the filesystem. /// Returns an error if any of the changes could not be applied. -/// Tracks file paths affected by applying a patch. +/// Tracks file paths affected by applying a patch, preserving the path spelling +/// from the patch for user-facing summaries. pub struct AffectedPaths { pub added: Vec, pub modified: Vec, @@ -276,7 +253,11 @@ pub struct AffectedPaths { /// Apply the hunks to the filesystem, returning which files were added, modified, or deleted. /// Returns an error if the patch could not be applied. -fn apply_hunks_to_files(hunks: &[Hunk]) -> anyhow::Result { +async fn apply_hunks_to_files( + hunks: &[Hunk], + cwd: &AbsolutePathBuf, + fs: &dyn ExecutorFileSystem, +) -> anyhow::Result { if hunks.is_empty() { anyhow::bail!("No files were modified."); } @@ -285,48 +266,97 @@ fn apply_hunks_to_files(hunks: &[Hunk]) -> anyhow::Result { let mut modified: Vec = Vec::new(); let mut deleted: Vec = Vec::new(); for hunk in hunks { + let affected_path = hunk.path().to_path_buf(); + let path_abs = hunk.resolve_path(cwd); match hunk { - Hunk::AddFile { path, contents } => { - if let Some(parent) = path.parent() - && !parent.as_os_str().is_empty() - { - std::fs::create_dir_all(parent).with_context(|| { - format!("Failed to create parent directories for {}", path.display()) - })?; + Hunk::AddFile { contents, .. } => { + if let Some(parent_abs) = path_abs.parent() { + fs.create_directory(&parent_abs, CreateDirectoryOptions { recursive: true }) + .await + .with_context(|| { + format!( + "Failed to create parent directories for {}", + path_abs.display() + ) + })?; } - std::fs::write(path, contents) - .with_context(|| format!("Failed to write file {}", path.display()))?; - added.push(path.clone()); + fs.write_file(&path_abs, contents.clone().into_bytes()) + .await + .with_context(|| format!("Failed to write file {}", path_abs.display()))?; + added.push(affected_path); } - Hunk::DeleteFile { path } => { - std::fs::remove_file(path) - .with_context(|| format!("Failed to delete file {}", path.display()))?; - deleted.push(path.clone()); + Hunk::DeleteFile { .. } => { + let result: io::Result<()> = async { + let metadata = fs.get_metadata(&path_abs).await?; + if metadata.is_directory { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "path is a directory", + )); + } + fs.remove( + &path_abs, + RemoveOptions { + recursive: false, + force: false, + }, + ) + .await + } + .await; + result.with_context(|| format!("Failed to delete file {}", path_abs.display()))?; + deleted.push(affected_path); } Hunk::UpdateFile { - path, - move_path, - chunks, + move_path, chunks, .. } => { let AppliedPatch { new_contents, .. } = - derive_new_contents_from_chunks(path, chunks)?; + derive_new_contents_from_chunks(&path_abs, chunks, fs).await?; if let Some(dest) = move_path { - if let Some(parent) = dest.parent() - && !parent.as_os_str().is_empty() - { - std::fs::create_dir_all(parent).with_context(|| { - format!("Failed to create parent directories for {}", dest.display()) + let dest_abs = AbsolutePathBuf::resolve_path_against_base(dest, cwd); + if let Some(parent_abs) = dest_abs.parent() { + fs.create_directory( + &parent_abs, + CreateDirectoryOptions { recursive: true }, + ) + .await + .with_context(|| { + format!( + "Failed to create parent directories for {}", + dest_abs.display() + ) })?; } - std::fs::write(dest, new_contents) - .with_context(|| format!("Failed to write file {}", dest.display()))?; - std::fs::remove_file(path) - .with_context(|| format!("Failed to remove original {}", path.display()))?; - modified.push(dest.clone()); + fs.write_file(&dest_abs, new_contents.into_bytes()) + .await + .with_context(|| format!("Failed to write file {}", dest_abs.display()))?; + let result: io::Result<()> = async { + let metadata = fs.get_metadata(&path_abs).await?; + if metadata.is_directory { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "path is a directory", + )); + } + fs.remove( + &path_abs, + RemoveOptions { + recursive: false, + force: false, + }, + ) + .await + } + .await; + result.with_context(|| { + format!("Failed to remove original {}", path_abs.display()) + })?; + modified.push(affected_path); } else { - std::fs::write(path, new_contents) - .with_context(|| format!("Failed to write file {}", path.display()))?; - modified.push(path.clone()); + fs.write_file(&path_abs, new_contents.into_bytes()) + .await + .with_context(|| format!("Failed to write file {}", path_abs.display()))?; + modified.push(affected_path); } } } @@ -345,19 +375,17 @@ struct AppliedPatch { /// Return *only* the new file contents (joined into a single `String`) after /// applying the chunks to the file at `path`. -fn derive_new_contents_from_chunks( - path: &Path, +async fn derive_new_contents_from_chunks( + path_abs: &AbsolutePathBuf, chunks: &[UpdateFileChunk], + fs: &dyn ExecutorFileSystem, ) -> std::result::Result { - let original_contents = match std::fs::read_to_string(path) { - Ok(contents) => contents, - Err(err) => { - return Err(ApplyPatchError::IoError(IoError { - context: format!("Failed to read file to update {}", path.display()), - source: err, - })); - } - }; + let original_contents = fs.read_file_text(path_abs).await.map_err(|err| { + ApplyPatchError::IoError(IoError { + context: format!("Failed to read file to update {}", path_abs.display()), + source: err, + }) + })?; let mut original_lines: Vec = original_contents.split('\n').map(String::from).collect(); @@ -367,7 +395,7 @@ fn derive_new_contents_from_chunks( original_lines.pop(); } - let replacements = compute_replacements(&original_lines, path, chunks)?; + let replacements = compute_replacements(&original_lines, path_abs.as_path(), chunks)?; let new_lines = apply_replacements(original_lines, &replacements); let mut new_lines = new_lines; if !new_lines.last().is_some_and(String::is_empty) { @@ -508,22 +536,24 @@ pub struct ApplyPatchFileUpdate { content: String, } -pub fn unified_diff_from_chunks( - path: &Path, +pub async fn unified_diff_from_chunks( + path_abs: &AbsolutePathBuf, chunks: &[UpdateFileChunk], + fs: &dyn ExecutorFileSystem, ) -> std::result::Result { - unified_diff_from_chunks_with_context(path, chunks, /*context*/ 1) + unified_diff_from_chunks_with_context(path_abs, chunks, /*context*/ 1, fs).await } -pub fn unified_diff_from_chunks_with_context( - path: &Path, +pub async fn unified_diff_from_chunks_with_context( + path_abs: &AbsolutePathBuf, chunks: &[UpdateFileChunk], context: usize, + fs: &dyn ExecutorFileSystem, ) -> std::result::Result { let AppliedPatch { original_contents, new_contents, - } = derive_new_contents_from_chunks(path, chunks)?; + } = derive_new_contents_from_chunks(path_abs, chunks, fs).await?; let text_diff = TextDiff::from_lines(&original_contents, &new_contents); let unified_diff = text_diff.unified_diff().context_radius(context).to_string(); Ok(ApplyPatchFileUpdate { @@ -554,6 +584,8 @@ pub fn print_summary( #[cfg(test)] mod tests { use super::*; + use codex_exec_server::LOCAL_FS; + use codex_utils_absolute_path::test_support::PathExt; use pretty_assertions::assert_eq; use std::fs; use std::string::ToString; @@ -564,8 +596,8 @@ mod tests { format!("*** Begin Patch\n{body}\n*** End Patch") } - #[test] - fn test_add_file_hunk_creates_file_with_contents() { + #[tokio::test] + async fn test_add_file_hunk_creates_file_with_contents() { let dir = tempdir().unwrap(); let path = dir.path().join("add.txt"); let patch = wrap_patch(&format!( @@ -576,7 +608,15 @@ mod tests { )); let mut stdout = Vec::new(); let mut stderr = Vec::new(); - apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); + apply_patch( + &patch, + &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), + &mut stdout, + &mut stderr, + LOCAL_FS.as_ref(), + ) + .await + .unwrap(); // Verify expected stdout and stderr outputs. let stdout_str = String::from_utf8(stdout).unwrap(); let stderr_str = String::from_utf8(stderr).unwrap(); @@ -590,15 +630,88 @@ mod tests { assert_eq!(contents, "ab\ncd\n"); } - #[test] - fn test_delete_file_hunk_removes_file() { + #[tokio::test] + async fn test_apply_patch_hunks_accept_relative_and_absolute_paths() { + let dir = tempdir().unwrap(); + let cwd = dir.path().abs(); + let relative_add = dir.path().join("relative-add.txt"); + let absolute_add = dir.path().join("absolute-add.txt"); + let relative_delete = dir.path().join("relative-delete.txt"); + let absolute_delete = dir.path().join("absolute-delete.txt"); + let relative_update = dir.path().join("relative-update.txt"); + let absolute_update = dir.path().join("absolute-update.txt"); + fs::write(&relative_delete, "delete relative\n").unwrap(); + fs::write(&absolute_delete, "delete absolute\n").unwrap(); + fs::write(&relative_update, "relative old\n").unwrap(); + fs::write(&absolute_update, "absolute old\n").unwrap(); + + let patch = wrap_patch(&format!( + r#"*** Add File: relative-add.txt ++relative add +*** Add File: {} ++absolute add +*** Delete File: relative-delete.txt +*** Delete File: {} +*** Update File: relative-update.txt +@@ +-relative old ++relative new +*** Update File: {} +@@ +-absolute old ++absolute new"#, + absolute_add.display(), + absolute_delete.display(), + absolute_update.display(), + )); + let mut stdout = Vec::new(); + let mut stderr = Vec::new(); + + apply_patch(&patch, &cwd, &mut stdout, &mut stderr, LOCAL_FS.as_ref()) + .await + .unwrap(); + + assert_eq!(fs::read_to_string(&relative_add).unwrap(), "relative add\n"); + assert_eq!(fs::read_to_string(&absolute_add).unwrap(), "absolute add\n"); + assert!(!relative_delete.exists()); + assert!(!absolute_delete.exists()); + assert_eq!( + fs::read_to_string(&relative_update).unwrap(), + "relative new\n" + ); + assert_eq!( + fs::read_to_string(&absolute_update).unwrap(), + "absolute new\n" + ); + assert_eq!(String::from_utf8(stderr).unwrap(), ""); + assert_eq!( + String::from_utf8(stdout).unwrap(), + format!( + "Success. Updated the following files:\nA relative-add.txt\nA {}\nM relative-update.txt\nM {}\nD relative-delete.txt\nD {}\n", + absolute_add.display(), + absolute_update.display(), + absolute_delete.display(), + ) + ); + } + + #[tokio::test] + async fn test_delete_file_hunk_removes_file() { let dir = tempdir().unwrap(); let path = dir.path().join("del.txt"); fs::write(&path, "x").unwrap(); let patch = wrap_patch(&format!("*** Delete File: {}", path.display())); let mut stdout = Vec::new(); let mut stderr = Vec::new(); - apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); + apply_patch( + &patch, + &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), + &mut stdout, + &mut stderr, + LOCAL_FS.as_ref(), + ) + .await + .unwrap(); let stdout_str = String::from_utf8(stdout).unwrap(); let stderr_str = String::from_utf8(stderr).unwrap(); let expected_out = format!( @@ -610,8 +723,8 @@ mod tests { assert!(!path.exists()); } - #[test] - fn test_update_file_hunk_modifies_content() { + #[tokio::test] + async fn test_update_file_hunk_modifies_content() { let dir = tempdir().unwrap(); let path = dir.path().join("update.txt"); fs::write(&path, "foo\nbar\n").unwrap(); @@ -625,7 +738,15 @@ mod tests { )); let mut stdout = Vec::new(); let mut stderr = Vec::new(); - apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); + apply_patch( + &patch, + &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), + &mut stdout, + &mut stderr, + LOCAL_FS.as_ref(), + ) + .await + .unwrap(); // Validate modified file contents and expected stdout/stderr. let stdout_str = String::from_utf8(stdout).unwrap(); let stderr_str = String::from_utf8(stderr).unwrap(); @@ -639,8 +760,8 @@ mod tests { assert_eq!(contents, "foo\nbaz\n"); } - #[test] - fn test_update_file_hunk_can_move_file() { + #[tokio::test] + async fn test_update_file_hunk_can_move_file() { let dir = tempdir().unwrap(); let src = dir.path().join("src.txt"); let dest = dir.path().join("dst.txt"); @@ -656,7 +777,15 @@ mod tests { )); let mut stdout = Vec::new(); let mut stderr = Vec::new(); - apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); + apply_patch( + &patch, + &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), + &mut stdout, + &mut stderr, + LOCAL_FS.as_ref(), + ) + .await + .unwrap(); // Validate move semantics and expected stdout/stderr. let stdout_str = String::from_utf8(stdout).unwrap(); let stderr_str = String::from_utf8(stderr).unwrap(); @@ -673,8 +802,8 @@ mod tests { /// Verify that a single `Update File` hunk with multiple change chunks can update different /// parts of a file and that the file is listed only once in the summary. - #[test] - fn test_multiple_update_chunks_apply_to_single_file() { + #[tokio::test] + async fn test_multiple_update_chunks_apply_to_single_file() { // Start with a file containing four lines. let dir = tempdir().unwrap(); let path = dir.path().join("multi.txt"); @@ -696,7 +825,15 @@ mod tests { )); let mut stdout = Vec::new(); let mut stderr = Vec::new(); - apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); + apply_patch( + &patch, + &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), + &mut stdout, + &mut stderr, + LOCAL_FS.as_ref(), + ) + .await + .unwrap(); let stdout_str = String::from_utf8(stdout).unwrap(); let stderr_str = String::from_utf8(stderr).unwrap(); let expected_out = format!( @@ -713,8 +850,8 @@ mod tests { /// replacements in separate chunks that appear in non‑adjacent parts of the /// file. Verifies that all edits are applied and that the summary lists the /// file only once. - #[test] - fn test_update_file_hunk_interleaved_changes() { + #[tokio::test] + async fn test_update_file_hunk_interleaved_changes() { let dir = tempdir().unwrap(); let path = dir.path().join("interleaved.txt"); @@ -745,7 +882,15 @@ mod tests { let mut stdout = Vec::new(); let mut stderr = Vec::new(); - apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); + apply_patch( + &patch, + &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), + &mut stdout, + &mut stderr, + LOCAL_FS.as_ref(), + ) + .await + .unwrap(); let stdout_str = String::from_utf8(stdout).unwrap(); let stderr_str = String::from_utf8(stderr).unwrap(); @@ -761,8 +906,8 @@ mod tests { assert_eq!(contents, "a\nB\nc\nd\nE\nf\ng\n"); } - #[test] - fn test_pure_addition_chunk_followed_by_removal() { + #[tokio::test] + async fn test_pure_addition_chunk_followed_by_removal() { let dir = tempdir().unwrap(); let path = dir.path().join("panic.txt"); fs::write(&path, "line1\nline2\nline3\n").unwrap(); @@ -780,7 +925,15 @@ mod tests { )); let mut stdout = Vec::new(); let mut stderr = Vec::new(); - apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); + apply_patch( + &patch, + &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), + &mut stdout, + &mut stderr, + LOCAL_FS.as_ref(), + ) + .await + .unwrap(); let contents = fs::read_to_string(path).unwrap(); assert_eq!( contents, @@ -794,8 +947,8 @@ mod tests { /// internal matcher failed requiring an exact byte-for-byte match. The /// fuzzy-matching pass that normalises common punctuation should now bridge /// the gap. - #[test] - fn test_update_line_with_unicode_dash() { + #[tokio::test] + async fn test_update_line_with_unicode_dash() { let dir = tempdir().unwrap(); let path = dir.path().join("unicode.py"); @@ -814,7 +967,15 @@ mod tests { let mut stdout = Vec::new(); let mut stderr = Vec::new(); - apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); + apply_patch( + &patch, + &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), + &mut stdout, + &mut stderr, + LOCAL_FS.as_ref(), + ) + .await + .unwrap(); // File should now contain the replaced comment. let expected = "import asyncio # HELLO\n"; @@ -833,8 +994,8 @@ mod tests { assert_eq!(String::from_utf8(stderr).unwrap(), ""); } - #[test] - fn test_unified_diff() { + #[tokio::test] + async fn test_unified_diff() { // Start with a file containing four lines. let dir = tempdir().unwrap(); let path = dir.path().join("multi.txt"); @@ -857,7 +1018,10 @@ mod tests { [Hunk::UpdateFile { chunks, .. }] => chunks, _ => panic!("Expected a single UpdateFile hunk"), }; - let diff = unified_diff_from_chunks(&path, update_file_chunks).unwrap(); + let path_abs = path.as_path().abs(); + let diff = unified_diff_from_chunks(&path_abs, update_file_chunks, LOCAL_FS.as_ref()) + .await + .unwrap(); let expected_diff = r#"@@ -1,4 +1,4 @@ foo -bar @@ -873,8 +1037,8 @@ mod tests { assert_eq!(expected, diff); } - #[test] - fn test_unified_diff_first_line_replacement() { + #[tokio::test] + async fn test_unified_diff_first_line_replacement() { // Replace the very first line of the file. let dir = tempdir().unwrap(); let path = dir.path().join("first.txt"); @@ -896,7 +1060,10 @@ mod tests { _ => panic!("Expected a single UpdateFile hunk"), }; - let diff = unified_diff_from_chunks(&path, chunks).unwrap(); + let path_abs = path.as_path().abs(); + let diff = unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref()) + .await + .unwrap(); let expected_diff = r#"@@ -1,2 +1,2 @@ -foo +FOO @@ -909,8 +1076,8 @@ mod tests { assert_eq!(expected, diff); } - #[test] - fn test_unified_diff_last_line_replacement() { + #[tokio::test] + async fn test_unified_diff_last_line_replacement() { // Replace the very last line of the file. let dir = tempdir().unwrap(); let path = dir.path().join("last.txt"); @@ -933,7 +1100,10 @@ mod tests { _ => panic!("Expected a single UpdateFile hunk"), }; - let diff = unified_diff_from_chunks(&path, chunks).unwrap(); + let path_abs = path.as_path().abs(); + let diff = unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref()) + .await + .unwrap(); let expected_diff = r#"@@ -2,2 +2,2 @@ bar -baz @@ -946,8 +1116,8 @@ mod tests { assert_eq!(expected, diff); } - #[test] - fn test_unified_diff_insert_at_eof() { + #[tokio::test] + async fn test_unified_diff_insert_at_eof() { // Insert a new line at end‑of‑file. let dir = tempdir().unwrap(); let path = dir.path().join("insert.txt"); @@ -968,7 +1138,10 @@ mod tests { _ => panic!("Expected a single UpdateFile hunk"), }; - let diff = unified_diff_from_chunks(&path, chunks).unwrap(); + let path_abs = path.as_path().abs(); + let diff = unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref()) + .await + .unwrap(); let expected_diff = r#"@@ -3 +3,2 @@ baz +quux @@ -980,8 +1153,8 @@ mod tests { assert_eq!(expected, diff); } - #[test] - fn test_unified_diff_interleaved_changes() { + #[tokio::test] + async fn test_unified_diff_interleaved_changes() { // Original file with six lines. let dir = tempdir().unwrap(); let path = dir.path().join("interleaved.txt"); @@ -1014,7 +1187,10 @@ mod tests { _ => panic!("Expected a single UpdateFile hunk"), }; - let diff = unified_diff_from_chunks(&path, chunks).unwrap(); + let path_abs = path.as_path().abs(); + let diff = unified_diff_from_chunks(&path_abs, chunks, LOCAL_FS.as_ref()) + .await + .unwrap(); let expected_diff = r#"@@ -1,6 +1,7 @@ a @@ -1037,7 +1213,15 @@ mod tests { let mut stdout = Vec::new(); let mut stderr = Vec::new(); - apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); + apply_patch( + &patch, + &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), + &mut stdout, + &mut stderr, + LOCAL_FS.as_ref(), + ) + .await + .unwrap(); let contents = fs::read_to_string(path).unwrap(); assert_eq!( contents, @@ -1052,8 +1236,8 @@ g ); } - #[test] - fn test_apply_patch_fails_on_write_error() { + #[tokio::test] + async fn test_apply_patch_fails_on_write_error() { let dir = tempdir().unwrap(); let path = dir.path().join("readonly.txt"); fs::write(&path, "before\n").unwrap(); @@ -1068,7 +1252,14 @@ g let mut stdout = Vec::new(); let mut stderr = Vec::new(); - let result = apply_patch(&patch, &mut stdout, &mut stderr); + let result = apply_patch( + &patch, + &AbsolutePathBuf::from_absolute_path(dir.path()).unwrap(), + &mut stdout, + &mut stderr, + LOCAL_FS.as_ref(), + ) + .await; assert!(result.is_err()); } } diff --git a/codex-rs/apply-patch/src/parser.rs b/codex-rs/apply-patch/src/parser.rs index 274a45497a..24ebcb1463 100644 --- a/codex-rs/apply-patch/src/parser.rs +++ b/codex-rs/apply-patch/src/parser.rs @@ -23,6 +23,9 @@ //! The parser below is a little more lenient than the explicit spec and allows for //! leading/trailing whitespace around patch markers. use crate::ApplyPatchArgs; +use codex_utils_absolute_path::AbsolutePathBuf; +#[cfg(test)] +use codex_utils_absolute_path::test_support::PathBufExt; use std::path::Path; use std::path::PathBuf; @@ -76,11 +79,28 @@ pub enum Hunk { } impl Hunk { - pub fn resolve_path(&self, cwd: &Path) -> PathBuf { + pub fn resolve_path(&self, cwd: &AbsolutePathBuf) -> AbsolutePathBuf { + let path = match self { + Hunk::UpdateFile { path, .. } => path, + Hunk::AddFile { .. } | Hunk::DeleteFile { .. } => self.path(), + }; + AbsolutePathBuf::resolve_path_against_base(path, cwd) + } + + /// Returns the path affected by this hunk, using the move destination for rename hunks. + pub fn path(&self) -> &Path { match self { - Hunk::AddFile { path, .. } => cwd.join(path), - Hunk::DeleteFile { path } => cwd.join(path), - Hunk::UpdateFile { path, .. } => cwd.join(path), + Hunk::AddFile { path, .. } => path, + Hunk::DeleteFile { path } => path, + Hunk::UpdateFile { + move_path: Some(path), + .. + } => path, + Hunk::UpdateFile { + path, + move_path: None, + .. + } => path, } } } @@ -583,6 +603,108 @@ fn test_parse_patch() { ); } +#[test] +fn test_parse_patch_accepts_relative_and_absolute_hunk_paths() { + let dir = tempfile::tempdir().unwrap(); + let absolute_delete = dir.path().join("absolute-delete.py").abs(); + let absolute_update = dir.path().join("absolute-update.py").abs(); + let patch_text = format!( + r#"*** Begin Patch +*** Add File: relative-add.py ++content +*** Delete File: {} +*** Update File: {} +@@ +-old ++new +*** End Patch"#, + absolute_delete.display(), + absolute_update.display() + ); + + assert_eq!( + parse_patch_text(&patch_text, ParseMode::Strict) + .unwrap() + .hunks, + vec![ + AddFile { + path: PathBuf::from("relative-add.py"), + contents: "content\n".to_string() + }, + DeleteFile { + path: absolute_delete.to_path_buf() + }, + UpdateFile { + path: absolute_update.to_path_buf(), + move_path: None, + chunks: vec![UpdateFileChunk { + change_context: None, + old_lines: vec!["old".to_string()], + new_lines: vec!["new".to_string()], + is_end_of_file: false + }] + }, + ] + ); +} + +#[test] +fn test_hunk_resolve_path_accepts_relative_and_absolute_paths() { + let cwd_dir = tempfile::tempdir().unwrap(); + let cwd = cwd_dir.path().to_path_buf().abs(); + let absolute_dir = tempfile::tempdir().unwrap(); + let absolute_add = absolute_dir.path().join("absolute-add.py").abs(); + let absolute_delete = absolute_dir.path().join("absolute-delete.py").abs(); + let absolute_update = absolute_dir.path().join("absolute-update.py").abs(); + + for (hunk, expected_path) in [ + ( + AddFile { + path: PathBuf::from("relative-add.py"), + contents: String::new(), + }, + cwd.join("relative-add.py"), + ), + ( + DeleteFile { + path: PathBuf::from("relative-delete.py"), + }, + cwd.join("relative-delete.py"), + ), + ( + UpdateFile { + path: PathBuf::from("relative-update.py"), + move_path: None, + chunks: Vec::new(), + }, + cwd.join("relative-update.py"), + ), + ( + AddFile { + path: absolute_add.to_path_buf(), + contents: String::new(), + }, + absolute_add, + ), + ( + DeleteFile { + path: absolute_delete.to_path_buf(), + }, + absolute_delete, + ), + ( + UpdateFile { + path: absolute_update.to_path_buf(), + move_path: None, + chunks: Vec::new(), + }, + absolute_update, + ), + ] { + assert_eq!(hunk.resolve_path(&cwd), expected_path); + } +} + #[test] fn test_parse_patch_lenient() { let patch_text = r#"*** Begin Patch diff --git a/codex-rs/apply-patch/src/standalone_executable.rs b/codex-rs/apply-patch/src/standalone_executable.rs index d77a82fa95..149bfd3382 100644 --- a/codex-rs/apply-patch/src/standalone_executable.rs +++ b/codex-rs/apply-patch/src/standalone_executable.rs @@ -48,7 +48,30 @@ pub fn run_main() -> i32 { let mut stdout = std::io::stdout(); let mut stderr = std::io::stderr(); - match crate::apply_patch(&patch_arg, &mut stdout, &mut stderr) { + let cwd = match codex_utils_absolute_path::AbsolutePathBuf::current_dir() { + Ok(cwd) => cwd, + Err(err) => { + eprintln!("Error: Failed to determine current directory.\n{err}"); + return 1; + } + }; + let runtime = match tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + { + Ok(runtime) => runtime, + Err(err) => { + eprintln!("Error: Failed to initialize runtime.\n{err}"); + return 1; + } + }; + match runtime.block_on(crate::apply_patch( + &patch_arg, + &cwd, + &mut stdout, + &mut stderr, + codex_exec_server::LOCAL_FS.as_ref(), + )) { Ok(()) => { // Flush to ensure output ordering when used in pipelines. let _ = stdout.flush(); diff --git a/codex-rs/apply-patch/tests/suite/tool.rs b/codex-rs/apply-patch/tests/suite/tool.rs index 56cd6e57a7..8499d0fb90 100644 --- a/codex-rs/apply-patch/tests/suite/tool.rs +++ b/codex-rs/apply-patch/tests/suite/tool.rs @@ -2,6 +2,7 @@ use assert_cmd::Command; use pretty_assertions::assert_eq; use std::fs; use std::path::Path; +use std::path::PathBuf; use tempfile::tempdir; fn run_apply_patch_in_dir(dir: &Path, patch: &str) -> anyhow::Result { @@ -16,9 +17,14 @@ fn apply_patch_command(dir: &Path) -> anyhow::Result { Ok(cmd) } +fn resolved_under(root: &Path, path: &str) -> anyhow::Result { + Ok(root.canonicalize()?.join(path)) +} + #[test] fn test_apply_patch_cli_applies_multiple_operations() -> anyhow::Result<()> { let tmp = tempdir()?; + let add_path = tmp.path().join("nested/new.txt"); let modify_path = tmp.path().join("modify.txt"); let delete_path = tmp.path().join("delete.txt"); @@ -31,10 +37,7 @@ fn test_apply_patch_cli_applies_multiple_operations() -> anyhow::Result<()> { "Success. Updated the following files:\nA nested/new.txt\nM modify.txt\nD delete.txt\n", ); - assert_eq!( - fs::read_to_string(tmp.path().join("nested/new.txt"))?, - "created\n" - ); + assert_eq!(fs::read_to_string(add_path)?, "created\n"); assert_eq!(fs::read_to_string(&modify_path)?, "line1\nchanged\n"); assert!(!delete_path.exists()); @@ -98,13 +101,17 @@ fn test_apply_patch_cli_rejects_empty_patch() -> anyhow::Result<()> { fn test_apply_patch_cli_reports_missing_context() -> anyhow::Result<()> { let tmp = tempdir()?; let target_path = tmp.path().join("modify.txt"); + let expected_target_path = resolved_under(tmp.path(), "modify.txt")?; fs::write(&target_path, "line1\nline2\n")?; apply_patch_command(tmp.path())? .arg("*** Begin Patch\n*** Update File: modify.txt\n@@\n-missing\n+changed\n*** End Patch") .assert() .failure() - .stderr("Failed to find expected lines in modify.txt:\nmissing\n"); + .stderr(format!( + "Failed to find expected lines in {}:\nmissing\n", + expected_target_path.display() + )); assert_eq!(fs::read_to_string(&target_path)?, "line1\nline2\n"); Ok(()) @@ -113,12 +120,16 @@ fn test_apply_patch_cli_reports_missing_context() -> anyhow::Result<()> { #[test] fn test_apply_patch_cli_rejects_missing_file_delete() -> anyhow::Result<()> { let tmp = tempdir()?; + let missing_path = resolved_under(tmp.path(), "missing.txt")?; apply_patch_command(tmp.path())? .arg("*** Begin Patch\n*** Delete File: missing.txt\n*** End Patch") .assert() .failure() - .stderr("Failed to delete file missing.txt\n"); + .stderr(format!( + "Failed to delete file {}\n", + missing_path.display() + )); Ok(()) } @@ -139,14 +150,16 @@ fn test_apply_patch_cli_rejects_empty_update_hunk() -> anyhow::Result<()> { #[test] fn test_apply_patch_cli_requires_existing_file_for_update() -> anyhow::Result<()> { let tmp = tempdir()?; + let missing_path = resolved_under(tmp.path(), "missing.txt")?; apply_patch_command(tmp.path())? .arg("*** Begin Patch\n*** Update File: missing.txt\n@@\n-old\n+new\n*** End Patch") .assert() .failure() - .stderr( - "Failed to read file to update missing.txt: No such file or directory (os error 2)\n", - ); + .stderr(format!( + "Failed to read file to update {}: No such file or directory (os error 2)\n", + missing_path.display() + )); Ok(()) } @@ -195,13 +208,18 @@ fn test_apply_patch_cli_add_overwrites_existing_file() -> anyhow::Result<()> { #[test] fn test_apply_patch_cli_delete_directory_fails() -> anyhow::Result<()> { let tmp = tempdir()?; - fs::create_dir(tmp.path().join("dir"))?; + let dir = tmp.path().join("dir"); + let expected_dir = resolved_under(tmp.path(), "dir")?; + fs::create_dir(&dir)?; apply_patch_command(tmp.path())? .arg("*** Begin Patch\n*** Delete File: dir\n*** End Patch") .assert() .failure() - .stderr("Failed to delete file dir\n"); + .stderr(format!( + "Failed to delete file {}\n", + expected_dir.display() + )); Ok(()) } @@ -243,13 +261,17 @@ fn test_apply_patch_cli_updates_file_appends_trailing_newline() -> anyhow::Resul fn test_apply_patch_cli_failure_after_partial_success_leaves_changes() -> anyhow::Result<()> { let tmp = tempdir()?; let new_file = tmp.path().join("created.txt"); + let missing_file = resolved_under(tmp.path(), "missing.txt")?; apply_patch_command(tmp.path())? .arg("*** Begin Patch\n*** Add File: created.txt\n+hello\n*** Update File: missing.txt\n@@\n-old\n+new\n*** End Patch") .assert() .failure() .stdout("") - .stderr("Failed to read file to update missing.txt: No such file or directory (os error 2)\n"); + .stderr(format!( + "Failed to read file to update {}: No such file or directory (os error 2)\n", + missing_file.display() + )); assert_eq!(fs::read_to_string(&new_file)?, "hello\n"); diff --git a/codex-rs/arg0/Cargo.toml b/codex-rs/arg0/Cargo.toml index cd409fedd8..8da0fcbd0b 100644 --- a/codex-rs/arg0/Cargo.toml +++ b/codex-rs/arg0/Cargo.toml @@ -14,9 +14,11 @@ workspace = true [dependencies] anyhow = { workspace = true } codex-apply-patch = { workspace = true } +codex-exec-server = { workspace = true } codex-linux-sandbox = { workspace = true } codex-sandboxing = { workspace = true } codex-shell-escalation = { workspace = true } +codex-utils-absolute-path = { workspace = true } codex-utils-home-dir = { workspace = true } dotenvy = { workspace = true } tempfile = { workspace = true } diff --git a/codex-rs/arg0/src/lib.rs b/codex-rs/arg0/src/lib.rs index efad6c2481..f8b61796b5 100644 --- a/codex-rs/arg0/src/lib.rs +++ b/codex-rs/arg0/src/lib.rs @@ -99,7 +99,24 @@ pub fn arg0_dispatch() -> Option { Some(patch_arg) => { let mut stdout = std::io::stdout(); let mut stderr = std::io::stderr(); - match codex_apply_patch::apply_patch(&patch_arg, &mut stdout, &mut stderr) { + let cwd = match codex_utils_absolute_path::AbsolutePathBuf::current_dir() { + Ok(cwd) => cwd, + Err(_) => std::process::exit(1), + }; + let runtime = match tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + { + Ok(runtime) => runtime, + Err(_) => std::process::exit(1), + }; + match runtime.block_on(codex_apply_patch::apply_patch( + &patch_arg, + &cwd, + &mut stdout, + &mut stderr, + codex_exec_server::LOCAL_FS.as_ref(), + )) { Ok(()) => 0, Err(_) => 1, } diff --git a/codex-rs/backend-client/src/client.rs b/codex-rs/backend-client/src/client.rs index 3d5beb9d15..6049c1697b 100644 --- a/codex-rs/backend-client/src/client.rs +++ b/codex-rs/backend-client/src/client.rs @@ -503,6 +503,7 @@ impl Client { #[cfg(test)] mod tests { use super::*; + use codex_backend_openapi_models::models::AdditionalRateLimitDetails; use pretty_assertions::assert_eq; #[test] @@ -536,7 +537,7 @@ mod tests { }))), ..Default::default() }))), - additional_rate_limits: Some(Some(vec![crate::types::AdditionalRateLimitDetails { + additional_rate_limits: Some(Some(vec![AdditionalRateLimitDetails { limit_name: "codex_other".to_string(), metered_feature: "codex_other".to_string(), rate_limit: Some(Some(Box::new(crate::types::RateLimitStatusDetails { @@ -596,7 +597,7 @@ mod tests { let payload = RateLimitStatusPayload { plan_type: crate::types::PlanType::Plus, rate_limit: None, - additional_rate_limits: Some(Some(vec![crate::types::AdditionalRateLimitDetails { + additional_rate_limits: Some(Some(vec![AdditionalRateLimitDetails { limit_name: "codex_other".to_string(), metered_feature: "codex_other".to_string(), rate_limit: None, diff --git a/codex-rs/backend-client/src/lib.rs b/codex-rs/backend-client/src/lib.rs index a3b2f37608..397c2a2cd4 100644 --- a/codex-rs/backend-client/src/lib.rs +++ b/codex-rs/backend-client/src/lib.rs @@ -1,5 +1,5 @@ mod client; -pub mod types; +pub(crate) mod types; pub use client::Client; pub use client::RequestError; diff --git a/codex-rs/backend-client/src/types.rs b/codex-rs/backend-client/src/types.rs index 4ef0fb8462..22e0984cda 100644 --- a/codex-rs/backend-client/src/types.rs +++ b/codex-rs/backend-client/src/types.rs @@ -1,4 +1,3 @@ -pub use codex_backend_openapi_models::models::AdditionalRateLimitDetails; pub use codex_backend_openapi_models::models::ConfigFileResponse; pub use codex_backend_openapi_models::models::CreditStatusDetails; pub use codex_backend_openapi_models::models::PaginatedListTaskListItem; diff --git a/codex-rs/chatgpt/Cargo.toml b/codex-rs/chatgpt/Cargo.toml index 84c793b536..381b4fc873 100644 --- a/codex-rs/chatgpt/Cargo.toml +++ b/codex-rs/chatgpt/Cargo.toml @@ -11,15 +11,16 @@ workspace = true anyhow = { workspace = true } clap = { workspace = true, features = ["derive"] } codex-connectors = { workspace = true } +codex-config = { workspace = true } codex-core = { workspace = true } +codex-git-utils = { workspace = true } codex-login = { workspace = true } codex-utils-cli = { workspace = true } -codex-utils-cargo-bin = { workspace = true } serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } tokio = { workspace = true, features = ["full"] } -codex-git-utils = { workspace = true } [dev-dependencies] +codex-utils-cargo-bin = { workspace = true } pretty_assertions = { workspace = true } +serde_json = { workspace = true } tempfile = { workspace = true } diff --git a/codex-rs/chatgpt/src/chatgpt_token.rs b/codex-rs/chatgpt/src/chatgpt_token.rs index 4f6c492a43..d20a7e57c4 100644 --- a/codex-rs/chatgpt/src/chatgpt_token.rs +++ b/codex-rs/chatgpt/src/chatgpt_token.rs @@ -1,4 +1,4 @@ -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_login::AuthManager; use codex_login::token_data::TokenData; use std::path::Path; diff --git a/codex-rs/chatgpt/src/connectors.rs b/codex-rs/chatgpt/src/connectors.rs index 8ba12b3460..1ea293f974 100644 --- a/codex-rs/chatgpt/src/connectors.rs +++ b/codex-rs/chatgpt/src/connectors.rs @@ -1,5 +1,6 @@ use codex_core::config::Config; use codex_login::AuthManager; +use codex_login::CodexAuth; use codex_login::token_data::TokenData; use std::collections::HashSet; use std::time::Duration; @@ -32,7 +33,10 @@ async fn apps_enabled(config: &Config) -> bool { /*enable_codex_api_key_env*/ false, config.cli_auth_credentials_store_mode, ); - config.features.apps_enabled(Some(&auth_manager)).await + let auth = auth_manager.auth().await; + config + .features + .apps_enabled_for_auth(auth.as_ref().is_some_and(CodexAuth::is_chatgpt_auth)) } pub async fn list_connectors(config: &Config) -> anyhow::Result> { if !apps_enabled(config).await { diff --git a/codex-rs/cli/src/lib.rs b/codex-rs/cli/src/lib.rs index b6174efa3a..848391293c 100644 --- a/codex-rs/cli/src/lib.rs +++ b/codex-rs/cli/src/lib.rs @@ -1,10 +1,21 @@ -pub mod debug_sandbox; +pub(crate) mod debug_sandbox; mod exit_status; -pub mod login; +pub(crate) mod login; use clap::Parser; use codex_utils_cli::CliConfigOverrides; +pub use debug_sandbox::run_command_under_landlock; +pub use debug_sandbox::run_command_under_seatbelt; +pub use debug_sandbox::run_command_under_windows; +pub use login::read_api_key_from_stdin; +pub use login::run_login_status; +pub use login::run_login_with_api_key; +pub use login::run_login_with_chatgpt; +pub use login::run_login_with_device_code; +pub use login::run_login_with_device_code_fallback_to_browser; +pub use login::run_logout; + #[derive(Debug, Parser)] pub struct SeatbeltCommand { /// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR) diff --git a/codex-rs/cli/src/login.rs b/codex-rs/cli/src/login.rs index ced9d4931c..9fa7dc4508 100644 --- a/codex-rs/cli/src/login.rs +++ b/codex-rs/cli/src/login.rs @@ -8,8 +8,8 @@ //! support can request from users. use codex_app_server_protocol::AuthMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_core::config::Config; -use codex_login::AuthCredentialsStoreMode; use codex_login::CLIENT_ID; use codex_login::CodexAuth; use codex_login::ServerOptions; diff --git a/codex-rs/cli/src/main.rs b/codex-rs/cli/src/main.rs index 1c4f5068fd..24dd558ea7 100644 --- a/codex-rs/cli/src/main.rs +++ b/codex-rs/cli/src/main.rs @@ -10,12 +10,12 @@ use codex_chatgpt::apply_command::run_apply_command; use codex_cli::LandlockCommand; use codex_cli::SeatbeltCommand; use codex_cli::WindowsCommand; -use codex_cli::login::read_api_key_from_stdin; -use codex_cli::login::run_login_status; -use codex_cli::login::run_login_with_api_key; -use codex_cli::login::run_login_with_chatgpt; -use codex_cli::login::run_login_with_device_code; -use codex_cli::login::run_logout; +use codex_cli::read_api_key_from_stdin; +use codex_cli::run_login_status; +use codex_cli::run_login_with_api_key; +use codex_cli::run_login_with_chatgpt; +use codex_cli::run_login_with_device_code; +use codex_cli::run_logout; use codex_cloud_tasks::Cli as CloudTasksCli; use codex_exec::Cli as ExecCli; use codex_exec::Command as ExecCommand; @@ -27,7 +27,7 @@ use codex_state::state_db_path; use codex_tui::AppExitInfo; use codex_tui::Cli as TuiCli; use codex_tui::ExitReason; -use codex_tui::update_action::UpdateAction; +use codex_tui::UpdateAction; use codex_utils_cli::CliConfigOverrides; use owo_colors::OwoColorize; use std::io::IsTerminal; @@ -346,7 +346,7 @@ struct AppServerCommand { subcommand: Option, /// Transport endpoint URL. Supported values: `stdio://` (default), - /// `ws://IP:PORT`. + /// `ws://IP:PORT`, `off`. #[arg( long = "listen", value_name = "URL", @@ -438,15 +438,14 @@ fn format_exit_messages(exit_info: AppExitInfo, color_enabled: bool) -> Vec anyhow::Result<()> { &mut seatbelt_cli.config_overrides, root_config_overrides.clone(), ); - codex_cli::debug_sandbox::run_command_under_seatbelt( + codex_cli::run_command_under_seatbelt( seatbelt_cli, arg0_paths.codex_linux_sandbox_exe.clone(), ) @@ -899,7 +898,7 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { &mut landlock_cli.config_overrides, root_config_overrides.clone(), ); - codex_cli::debug_sandbox::run_command_under_landlock( + codex_cli::run_command_under_landlock( landlock_cli, arg0_paths.codex_linux_sandbox_exe.clone(), ) @@ -915,7 +914,7 @@ async fn cli_main(arg0_paths: Arg0DispatchPaths) -> anyhow::Result<()> { &mut windows_cli.config_overrides, root_config_overrides.clone(), ); - codex_cli::debug_sandbox::run_command_under_windows( + codex_cli::run_command_under_windows( windows_cli, arg0_paths.codex_linux_sandbox_exe.clone(), ) @@ -1173,7 +1172,7 @@ async fn run_debug_prompt_input_command( }); } - let prompt_input = codex_core::prompt_debug::build_prompt_input(config, input).await?; + let prompt_input = codex_core::build_prompt_input(config, input).await?; println!("{}", serde_json::to_string_pretty(&prompt_input)?); Ok(()) @@ -1993,6 +1992,12 @@ mod tests { ); } + #[test] + fn app_server_listen_off_parses() { + let app_server = app_server_from_args(["codex", "app-server", "--listen", "off"].as_ref()); + assert_eq!(app_server.listen, codex_app_server::AppServerTransport::Off); + } + #[test] fn app_server_listen_invalid_url_fails_to_parse() { let parse_result = diff --git a/codex-rs/cli/src/mcp_cmd.rs b/codex-rs/cli/src/mcp_cmd.rs index 144316b50e..f544ca82b7 100644 --- a/codex-rs/cli/src/mcp_cmd.rs +++ b/codex-rs/cli/src/mcp_cmd.rs @@ -8,24 +8,24 @@ use anyhow::bail; use clap::ArgGroup; use codex_config::types::McpServerConfig; use codex_config::types::McpServerTransportConfig; +use codex_core::McpManager; use codex_core::config::Config; use codex_core::config::edit::ConfigEditsBuilder; use codex_core::config::find_codex_home; use codex_core::config::load_global_mcp_servers; -use codex_core::mcp::McpManager; use codex_core::plugins::PluginsManager; -use codex_mcp::mcp::auth::McpOAuthLoginSupport; -use codex_mcp::mcp::auth::ResolvedMcpOAuthScopes; -use codex_mcp::mcp::auth::compute_auth_statuses; -use codex_mcp::mcp::auth::discover_supported_scopes; -use codex_mcp::mcp::auth::oauth_login_support; -use codex_mcp::mcp::auth::resolve_oauth_scopes; -use codex_mcp::mcp::auth::should_retry_without_scopes; +use codex_mcp::McpOAuthLoginSupport; +use codex_mcp::ResolvedMcpOAuthScopes; +use codex_mcp::compute_auth_statuses; +use codex_mcp::discover_supported_scopes; +use codex_mcp::oauth_login_support; +use codex_mcp::resolve_oauth_scopes; +use codex_mcp::should_retry_without_scopes; use codex_protocol::protocol::McpAuthStatus; use codex_rmcp_client::delete_oauth_tokens; use codex_rmcp_client::perform_oauth_login; use codex_utils_cli::CliConfigOverrides; -use codex_utils_cli::format_env_display::format_env_display; +use codex_utils_cli::format_env_display; /// Subcommands: /// - `list` — list configured servers (with `--json`) @@ -194,7 +194,7 @@ impl McpCli { async fn perform_oauth_login_retry_without_scopes( name: &str, url: &str, - store_mode: codex_rmcp_client::OAuthCredentialsStoreMode, + store_mode: codex_config::types::OAuthCredentialsStoreMode, http_headers: Option>, env_http_headers: Option>, resolved_scopes: &ResolvedMcpOAuthScopes, diff --git a/codex-rs/cli/src/wsl_paths.rs b/codex-rs/cli/src/wsl_paths.rs index 28366579e5..b90dc1e283 100644 --- a/codex-rs/cli/src/wsl_paths.rs +++ b/codex-rs/cli/src/wsl_paths.rs @@ -1,7 +1,7 @@ use std::ffi::OsStr; /// Returns true if the current process is running under WSL. -pub use codex_utils_path::env::is_wsl; +pub use codex_utils_path::is_wsl; /// Convert a Windows absolute path (`C:\foo\bar` or `C:/foo/bar`) to a WSL mount path (`/mnt/c/foo/bar`). /// Returns `None` if the input does not look like a Windows drive path. diff --git a/codex-rs/cloud-requirements/Cargo.toml b/codex-rs/cloud-requirements/Cargo.toml index 9eda7a7c36..59f8741cdb 100644 --- a/codex-rs/cloud-requirements/Cargo.toml +++ b/codex-rs/cloud-requirements/Cargo.toml @@ -12,6 +12,7 @@ async-trait = { workspace = true } base64 = { workspace = true } chrono = { workspace = true, features = ["serde"] } codex-backend-client = { workspace = true } +codex-config = { workspace = true } codex-core = { workspace = true } codex-login = { workspace = true } codex-otel = { workspace = true } diff --git a/codex-rs/cloud-requirements/src/lib.rs b/codex-rs/cloud-requirements/src/lib.rs index f2916c2e9b..ad4ebed12f 100644 --- a/codex-rs/cloud-requirements/src/lib.rs +++ b/codex-rs/cloud-requirements/src/lib.rs @@ -15,12 +15,12 @@ use chrono::DateTime; use chrono::Duration as ChronoDuration; use chrono::Utc; use codex_backend_client::Client as BackendClient; +use codex_config::types::AuthCredentialsStoreMode; use codex_core::config_loader::CloudRequirementsLoadError; use codex_core::config_loader::CloudRequirementsLoadErrorCode; use codex_core::config_loader::CloudRequirementsLoader; use codex_core::config_loader::ConfigRequirementsToml; use codex_core::util::backoff; -use codex_login::AuthCredentialsStoreMode; use codex_login::AuthManager; use codex_login::CodexAuth; use codex_login::RefreshTokenError; @@ -806,7 +806,7 @@ fn status_code_tag(status_code: Option) -> String { } fn emit_metric(metric_name: &str, tags: Vec<(&str, String)>) { - if let Some(metrics) = codex_otel::metrics::global() { + if let Some(metrics) = codex_otel::global() { let tag_refs = tags .iter() .map(|(key, value)| (*key, value.as_str())) @@ -820,7 +820,7 @@ mod tests { use super::*; use base64::Engine; use base64::engine::general_purpose::URL_SAFE_NO_PAD; - use codex_login::AuthCredentialsStoreMode; + use codex_config::types::AuthCredentialsStoreMode; use codex_protocol::protocol::AskForApproval; use pretty_assertions::assert_eq; use serde_json::json; diff --git a/codex-rs/cloud-tasks/src/lib.rs b/codex-rs/cloud-tasks/src/lib.rs index f96ac62ed9..7006d52b92 100644 --- a/codex-rs/cloud-tasks/src/lib.rs +++ b/codex-rs/cloud-tasks/src/lib.rs @@ -1,10 +1,10 @@ mod app; mod cli; -pub mod env_detect; +pub(crate) mod env_detect; mod new_task; -pub mod scrollable_diff; +pub(crate) mod scrollable_diff; mod ui; -pub mod util; +pub(crate) mod util; pub use cli::Cli; use anyhow::anyhow; @@ -1596,7 +1596,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option) -> an let total = ov.attempt_display_total(); let current = ov.selected_attempt + 1; app.status = format!("Viewing attempt {current} of {total}"); - ov.sd.to_top(); + ov.sd.scroll_to_top(); needs_redraw = true; } }; @@ -1672,7 +1672,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option) -> an let has_diff = ov.current_attempt().is_some_and(app::AttemptView::has_diff) || ov.base_can_apply; if has_text && has_diff { ov.set_view(app::DetailView::Prompt); - ov.sd.to_top(); + ov.sd.scroll_to_top(); needs_redraw = true; } } @@ -1683,7 +1683,7 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option) -> an let has_diff = ov.current_attempt().is_some_and(app::AttemptView::has_diff) || ov.base_can_apply; if has_text && has_diff { ov.set_view(app::DetailView::Diff); - ov.sd.to_top(); + ov.sd.scroll_to_top(); needs_redraw = true; } } @@ -1714,8 +1714,8 @@ pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option) -> an if let Some(ov) = &mut app.diff_overlay { let step = ov.sd.state.viewport_h.saturating_sub(1) as i16; ov.sd.page_by(-step); } needs_redraw = true; } - KeyCode::Home => { if let Some(ov) = &mut app.diff_overlay { ov.sd.to_top(); } needs_redraw = true; } - KeyCode::End => { if let Some(ov) = &mut app.diff_overlay { ov.sd.to_bottom(); } needs_redraw = true; } + KeyCode::Home => { if let Some(ov) = &mut app.diff_overlay { ov.sd.scroll_to_top(); } needs_redraw = true; } + KeyCode::End => { if let Some(ov) = &mut app.diff_overlay { ov.sd.scroll_to_bottom(); } needs_redraw = true; } _ => {} } } else if app.env_modal.is_some() { diff --git a/codex-rs/cloud-tasks/src/scrollable_diff.rs b/codex-rs/cloud-tasks/src/scrollable_diff.rs index 97dfb24895..59dd076b36 100644 --- a/codex-rs/cloud-tasks/src/scrollable_diff.rs +++ b/codex-rs/cloud-tasks/src/scrollable_diff.rs @@ -86,11 +86,11 @@ impl ScrollableDiff { self.scroll_by(delta); } - pub fn to_top(&mut self) { + pub fn scroll_to_top(&mut self) { self.state.scroll = 0; } - pub fn to_bottom(&mut self) { + pub fn scroll_to_bottom(&mut self) { self.state.scroll = self.max_scroll(); } diff --git a/codex-rs/code-mode/src/description.rs b/codex-rs/code-mode/src/description.rs index 3a87fef092..76ede634ba 100644 --- a/codex-rs/code-mode/src/description.rs +++ b/codex-rs/code-mode/src/description.rs @@ -26,6 +26,8 @@ const EXEC_DESCRIPTION_TEMPLATE: &str = r#"Run JavaScript code to orchestrate/co - `store(key: string, value: any)`: stores a serializable value under a string key for later `exec` calls in the same session. - `load(key: string)`: returns the stored value for a string key, or `undefined` if it is missing. - `notify(value: string | number | boolean | undefined | null)`: immediately injects an extra `custom_tool_call_output` for the current `exec` call. Values are stringified like `text(...)`. +- `setTimeout(callback: () => void, delayMs?: number)`: schedules a callback to run later and returns a timeout id. Pending timeouts do not keep `exec` alive by themselves; await an explicit promise if you need to wait for one. +- `clearTimeout(timeoutId?: number)`: cancels a timeout created by `setTimeout`. - `ALL_TOOLS`: metadata for the enabled nested tools as `{ name, description }` entries. - `yield_control()`: yields the accumulated output to the model immediately while the script keeps running."#; const WAIT_DESCRIPTION_TEMPLATE: &str = r#"- Use `wait` only after `exec` returns `Script running with cell ID ...`. @@ -556,4 +558,11 @@ mod tests { ); assert!(description.contains("### `foo` (`foo`)")); } + + #[test] + fn exec_description_mentions_timeout_helpers() { + let description = build_exec_tool_description(&[], /*code_mode_only*/ false); + assert!(description.contains("`setTimeout(callback: () => void, delayMs?: number)`")); + assert!(description.contains("`clearTimeout(timeoutId?: number)`")); + } } diff --git a/codex-rs/code-mode/src/runtime/callbacks.rs b/codex-rs/code-mode/src/runtime/callbacks.rs index b77ae82d68..5511baca23 100644 --- a/codex-rs/code-mode/src/runtime/callbacks.rs +++ b/codex-rs/code-mode/src/runtime/callbacks.rs @@ -3,6 +3,7 @@ use crate::response::FunctionCallOutputContentItem; use super::EXIT_SENTINEL; use super::RuntimeEvent; use super::RuntimeState; +use super::timers; use super::value::json_to_v8; use super::value::normalize_output_image; use super::value::serialize_output_text; @@ -185,6 +186,35 @@ pub(super) fn notify_callback( retval.set(v8::undefined(scope).into()); } +pub(super) fn set_timeout_callback( + scope: &mut v8::PinScope<'_, '_>, + args: v8::FunctionCallbackArguments, + mut retval: v8::ReturnValue, +) { + let timeout_id = match timers::schedule_timeout(scope, args) { + Ok(timeout_id) => timeout_id, + Err(error_text) => { + throw_type_error(scope, &error_text); + return; + } + }; + + retval.set(v8::Number::new(scope, timeout_id as f64).into()); +} + +pub(super) fn clear_timeout_callback( + scope: &mut v8::PinScope<'_, '_>, + args: v8::FunctionCallbackArguments, + mut retval: v8::ReturnValue, +) { + if let Err(error_text) = timers::clear_timeout(scope, args) { + throw_type_error(scope, &error_text); + return; + } + + retval.set(v8::undefined(scope).into()); +} + pub(super) fn yield_control_callback( scope: &mut v8::PinScope<'_, '_>, _args: v8::FunctionCallbackArguments, diff --git a/codex-rs/code-mode/src/runtime/globals.rs b/codex-rs/code-mode/src/runtime/globals.rs index 371479497b..2d419db908 100644 --- a/codex-rs/code-mode/src/runtime/globals.rs +++ b/codex-rs/code-mode/src/runtime/globals.rs @@ -1,8 +1,10 @@ use super::RuntimeState; +use super::callbacks::clear_timeout_callback; use super::callbacks::exit_callback; use super::callbacks::image_callback; use super::callbacks::load_callback; use super::callbacks::notify_callback; +use super::callbacks::set_timeout_callback; use super::callbacks::store_callback; use super::callbacks::text_callback; use super::callbacks::tool_callback; @@ -18,6 +20,8 @@ pub(super) fn install_globals(scope: &mut v8::PinScope<'_, '_>) -> Result<(), St let tools = build_tools_object(scope)?; let all_tools = build_all_tools_value(scope)?; + let clear_timeout = helper_function(scope, "clearTimeout", clear_timeout_callback)?; + let set_timeout = helper_function(scope, "setTimeout", set_timeout_callback)?; let text = helper_function(scope, "text", text_callback)?; let image = helper_function(scope, "image", image_callback)?; let store = helper_function(scope, "store", store_callback)?; @@ -28,6 +32,8 @@ pub(super) fn install_globals(scope: &mut v8::PinScope<'_, '_>) -> Result<(), St set_global(scope, global, "tools", tools.into())?; set_global(scope, global, "ALL_TOOLS", all_tools)?; + set_global(scope, global, "clearTimeout", clear_timeout.into())?; + set_global(scope, global, "setTimeout", set_timeout.into())?; set_global(scope, global, "text", text.into())?; set_global(scope, global, "image", image.into())?; set_global(scope, global, "store", store.into())?; diff --git a/codex-rs/code-mode/src/runtime/mod.rs b/codex-rs/code-mode/src/runtime/mod.rs index df90eda673..411f81bddc 100644 --- a/codex-rs/code-mode/src/runtime/mod.rs +++ b/codex-rs/code-mode/src/runtime/mod.rs @@ -1,6 +1,7 @@ mod callbacks; mod globals; mod module_loader; +mod timers; mod value; use std::collections::HashMap; @@ -75,6 +76,7 @@ pub(crate) enum TurnMessage { pub(crate) enum RuntimeCommand { ToolResponse { id: String, result: JsonValue }, ToolError { id: String, error_text: String }, + TimeoutFired { id: u64 }, Terminate, } @@ -103,6 +105,7 @@ pub(crate) fn spawn_runtime( event_tx: mpsc::UnboundedSender, ) -> Result<(std_mpsc::Sender, v8::IsolateHandle), String> { let (command_tx, command_rx) = std_mpsc::channel(); + let runtime_command_tx = command_tx.clone(); let (isolate_handle_tx, isolate_handle_rx) = std_mpsc::sync_channel(1); let enabled_tools = request .enabled_tools @@ -117,7 +120,13 @@ pub(crate) fn spawn_runtime( }; thread::spawn(move || { - run_runtime(config, event_tx, command_rx, isolate_handle_tx); + run_runtime( + config, + event_tx, + command_rx, + isolate_handle_tx, + runtime_command_tx, + ); }); let isolate_handle = isolate_handle_rx @@ -137,10 +146,13 @@ struct RuntimeConfig { pub(super) struct RuntimeState { event_tx: mpsc::UnboundedSender, pending_tool_calls: HashMap>, + pending_timeouts: HashMap, stored_values: HashMap, enabled_tools: Vec, next_tool_call_id: u64, + next_timeout_id: u64, tool_call_id: String, + runtime_command_tx: std_mpsc::Sender, exit_requested: bool, } @@ -168,6 +180,7 @@ fn run_runtime( event_tx: mpsc::UnboundedSender, command_rx: std_mpsc::Receiver, isolate_handle_tx: std_mpsc::SyncSender, + runtime_command_tx: std_mpsc::Sender, ) { initialize_v8(); @@ -185,10 +198,13 @@ fn run_runtime( scope.set_slot(RuntimeState { event_tx: event_tx.clone(), pending_tool_calls: HashMap::new(), + pending_timeouts: HashMap::new(), stored_values: config.stored_values, enabled_tools: config.enabled_tools, next_tool_call_id: 1, + next_timeout_id: 1, tool_call_id: config.tool_call_id, + runtime_command_tx, exit_requested: false, }); @@ -223,6 +239,7 @@ fn run_runtime( let Ok(command) = command_rx.recv() else { break; }; + match command { RuntimeCommand::Terminate => break, RuntimeCommand::ToolResponse { id, result } => { @@ -241,6 +258,12 @@ fn run_runtime( return; } } + RuntimeCommand::TimeoutFired { id } => { + if let Err(runtime_error) = timers::invoke_timeout_callback(scope, id) { + capture_scope_send_error(scope, &event_tx, Some(runtime_error)); + return; + } + } } scope.perform_microtask_checkpoint(); diff --git a/codex-rs/code-mode/src/runtime/timers.rs b/codex-rs/code-mode/src/runtime/timers.rs new file mode 100644 index 0000000000..01c414cefe --- /dev/null +++ b/codex-rs/code-mode/src/runtime/timers.rs @@ -0,0 +1,114 @@ +use std::thread; +use std::time::Duration; + +use super::RuntimeCommand; +use super::RuntimeState; +use super::value::value_to_error_text; + +pub(super) struct ScheduledTimeout { + callback: v8::Global, +} + +pub(super) fn schedule_timeout( + scope: &mut v8::PinScope<'_, '_>, + args: v8::FunctionCallbackArguments, +) -> Result { + let callback = args.get(0); + if !callback.is_function() { + return Err("setTimeout expects a function callback".to_string()); + } + let callback = v8::Local::::try_from(callback) + .map_err(|_| "setTimeout expects a function callback".to_string())?; + + let delay_ms = args + .get(1) + .number_value(scope) + .map(normalize_delay_ms) + .unwrap_or(0); + + let callback = v8::Global::new(scope, callback); + let state = scope + .get_slot_mut::() + .ok_or_else(|| "runtime state unavailable".to_string())?; + let timeout_id = state.next_timeout_id; + state.next_timeout_id = state.next_timeout_id.saturating_add(1); + let runtime_command_tx = state.runtime_command_tx.clone(); + state + .pending_timeouts + .insert(timeout_id, ScheduledTimeout { callback }); + thread::spawn(move || { + thread::sleep(Duration::from_millis(delay_ms)); + let _ = runtime_command_tx.send(RuntimeCommand::TimeoutFired { id: timeout_id }); + }); + + Ok(timeout_id) +} + +pub(super) fn clear_timeout( + scope: &mut v8::PinScope<'_, '_>, + args: v8::FunctionCallbackArguments, +) -> Result<(), String> { + let Some(timeout_id) = timeout_id_from_args(scope, args)? else { + return Ok(()); + }; + + let Some(state) = scope.get_slot_mut::() else { + return Err("runtime state unavailable".to_string()); + }; + state.pending_timeouts.remove(&timeout_id); + Ok(()) +} + +pub(super) fn invoke_timeout_callback( + scope: &mut v8::PinScope<'_, '_>, + timeout_id: u64, +) -> Result<(), String> { + let callback = { + let state = scope + .get_slot_mut::() + .ok_or_else(|| "runtime state unavailable".to_string())?; + state.pending_timeouts.remove(&timeout_id) + }; + let Some(callback) = callback else { + return Ok(()); + }; + + let tc = std::pin::pin!(v8::TryCatch::new(scope)); + let mut tc = tc.init(); + let callback = v8::Local::new(&tc, &callback.callback); + let receiver = v8::undefined(&tc).into(); + let _ = callback.call(&tc, receiver, &[]); + if tc.has_caught() { + return Err(tc + .exception() + .map(|exception| value_to_error_text(&mut tc, exception)) + .unwrap_or_else(|| "unknown code mode exception".to_string())); + } + + Ok(()) +} +fn timeout_id_from_args( + scope: &mut v8::PinScope<'_, '_>, + args: v8::FunctionCallbackArguments, +) -> Result, String> { + if args.length() == 0 || args.get(0).is_null_or_undefined() { + return Ok(None); + } + + let Some(timeout_id) = args.get(0).number_value(scope) else { + return Err("clearTimeout expects a numeric timeout id".to_string()); + }; + if !timeout_id.is_finite() || timeout_id <= 0.0 { + return Ok(None); + } + + Ok(Some(timeout_id.trunc().min(u64::MAX as f64) as u64)) +} + +fn normalize_delay_ms(delay_ms: f64) -> u64 { + if !delay_ms.is_finite() || delay_ms <= 0.0 { + 0 + } else { + delay_ms.trunc().min(u64::MAX as f64) as u64 + } +} diff --git a/codex-rs/codex-api/src/common.rs b/codex-rs/codex-api/src/common.rs index 39fb976e67..6a88cd1f47 100644 --- a/codex-rs/codex-api/src/common.rs +++ b/codex-rs/codex-api/src/common.rs @@ -153,7 +153,8 @@ impl From for OpenAiVerbosity { #[derive(Debug, Serialize, Clone, PartialEq)] pub struct ResponsesApiRequest { pub model: String, - pub instructions: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub instructions: Option, pub input: Vec, pub tools: Vec, pub tool_choice: String, @@ -168,6 +169,8 @@ pub struct ResponsesApiRequest { pub prompt_cache_key: Option, #[serde(skip_serializing_if = "Option::is_none")] pub text: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub client_metadata: Option>, } impl From<&ResponsesApiRequest> for ResponseCreateWsRequest { @@ -188,7 +191,7 @@ impl From<&ResponsesApiRequest> for ResponseCreateWsRequest { prompt_cache_key: request.prompt_cache_key.clone(), text: request.text.clone(), generate: None, - client_metadata: None, + client_metadata: request.client_metadata.clone(), } } } @@ -196,7 +199,8 @@ impl From<&ResponsesApiRequest> for ResponseCreateWsRequest { #[derive(Debug, Serialize)] pub struct ResponseCreateWsRequest { pub model: String, - pub instructions: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub instructions: Option, #[serde(skip_serializing_if = "Option::is_none")] pub previous_response_id: Option, pub input: Vec, diff --git a/codex-rs/codex-api/src/endpoint/mod.rs b/codex-rs/codex-api/src/endpoint/mod.rs index 6a748e533d..a6c2603f70 100644 --- a/codex-rs/codex-api/src/endpoint/mod.rs +++ b/codex-rs/codex-api/src/endpoint/mod.rs @@ -1,7 +1,22 @@ -pub mod compact; -pub mod memories; -pub mod models; -pub mod realtime_websocket; -pub mod responses; -pub mod responses_websocket; +pub(crate) mod compact; +pub(crate) mod memories; +pub(crate) mod models; +pub(crate) mod realtime_websocket; +pub(crate) mod responses; +pub(crate) mod responses_websocket; mod session; + +pub use compact::CompactClient; +pub use memories::MemoriesClient; +pub use models::ModelsClient; +pub use realtime_websocket::RealtimeEventParser; +pub use realtime_websocket::RealtimeSessionConfig; +pub use realtime_websocket::RealtimeSessionMode; +pub use realtime_websocket::RealtimeWebsocketClient; +pub use realtime_websocket::RealtimeWebsocketConnection; +pub use realtime_websocket::RealtimeWebsocketEvents; +pub use realtime_websocket::RealtimeWebsocketWriter; +pub use responses::ResponsesClient; +pub use responses::ResponsesOptions; +pub use responses_websocket::ResponsesWebsocketClient; +pub use responses_websocket::ResponsesWebsocketConnection; diff --git a/codex-rs/codex-api/src/endpoint/realtime_websocket/methods.rs b/codex-rs/codex-api/src/endpoint/realtime_websocket/methods.rs index eed0e470a4..544c7ad7a5 100644 --- a/codex-rs/codex-api/src/endpoint/realtime_websocket/methods.rs +++ b/codex-rs/codex-api/src/endpoint/realtime_websocket/methods.rs @@ -625,9 +625,9 @@ fn normalize_realtime_path(url: &mut Url) { #[cfg(test)] mod tests { use super::*; - use crate::endpoint::realtime_websocket::protocol::RealtimeHandoffRequested; use crate::endpoint::realtime_websocket::protocol::RealtimeTranscriptDelta; use crate::endpoint::realtime_websocket::protocol::RealtimeTranscriptEntry; + use codex_protocol::protocol::RealtimeHandoffRequested; use codex_protocol::protocol::RealtimeInputAudioSpeechStarted; use codex_protocol::protocol::RealtimeResponseCancelled; use http::HeaderValue; diff --git a/codex-rs/codex-api/src/endpoint/realtime_websocket/mod.rs b/codex-rs/codex-api/src/endpoint/realtime_websocket/mod.rs index d13585034a..867ebdff65 100644 --- a/codex-rs/codex-api/src/endpoint/realtime_websocket/mod.rs +++ b/codex-rs/codex-api/src/endpoint/realtime_websocket/mod.rs @@ -1,14 +1,12 @@ -pub mod methods; +pub(crate) mod methods; mod methods_common; mod methods_v1; mod methods_v2; -pub mod protocol; +pub(crate) mod protocol; mod protocol_common; mod protocol_v1; mod protocol_v2; -pub use codex_protocol::protocol::RealtimeAudioFrame; -pub use codex_protocol::protocol::RealtimeEvent; pub use methods::RealtimeWebsocketClient; pub use methods::RealtimeWebsocketConnection; pub use methods::RealtimeWebsocketEvents; diff --git a/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol.rs b/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol.rs index 2c629249fa..4941740556 100644 --- a/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol.rs +++ b/codex-rs/codex-api/src/endpoint/realtime_websocket/protocol.rs @@ -2,7 +2,6 @@ use crate::endpoint::realtime_websocket::protocol_v1::parse_realtime_event_v1; use crate::endpoint::realtime_websocket::protocol_v2::parse_realtime_event_v2; pub use codex_protocol::protocol::RealtimeAudioFrame; pub use codex_protocol::protocol::RealtimeEvent; -pub use codex_protocol::protocol::RealtimeHandoffRequested; pub use codex_protocol::protocol::RealtimeTranscriptDelta; pub use codex_protocol::protocol::RealtimeTranscriptEntry; use serde::Serialize; diff --git a/codex-rs/codex-api/src/endpoint/responses.rs b/codex-rs/codex-api/src/endpoint/responses.rs index 57a44d2e27..8e0e7384fd 100644 --- a/codex-rs/codex-api/src/endpoint/responses.rs +++ b/codex-rs/codex-api/src/endpoint/responses.rs @@ -4,11 +4,11 @@ use crate::common::ResponsesApiRequest; use crate::endpoint::session::EndpointSession; use crate::error::ApiError; use crate::provider::Provider; +use crate::requests::Compression; +use crate::requests::attach_item_ids; use crate::requests::headers::build_conversation_headers; use crate::requests::headers::insert_header; use crate::requests::headers::subagent_header; -use crate::requests::responses::Compression; -use crate::requests::responses::attach_item_ids; use crate::sse::spawn_response_stream; use crate::telemetry::SseTelemetry; use codex_client::HttpTransport; diff --git a/codex-rs/codex-api/src/endpoint/responses_websocket.rs b/codex-rs/codex-api/src/endpoint/responses_websocket.rs index d3b578db69..a60d188bda 100644 --- a/codex-rs/codex-api/src/endpoint/responses_websocket.rs +++ b/codex-rs/codex-api/src/endpoint/responses_websocket.rs @@ -6,8 +6,8 @@ use crate::common::ResponsesWsRequest; use crate::error::ApiError; use crate::provider::Provider; use crate::rate_limits::parse_rate_limit_event; -use crate::sse::responses::ResponsesStreamEvent; -use crate::sse::responses::process_responses_event; +use crate::sse::ResponsesStreamEvent; +use crate::sse::process_responses_event; use crate::telemetry::WebsocketTelemetry; use codex_client::TransportError; use codex_client::maybe_build_rustls_client_config_with_custom_ca; diff --git a/codex-rs/codex-api/src/lib.rs b/codex-rs/codex-api/src/lib.rs index c7bb607059..01a5637356 100644 --- a/codex-rs/codex-api/src/lib.rs +++ b/codex-rs/codex-api/src/lib.rs @@ -1,48 +1,58 @@ -pub mod api_bridge; -pub mod auth; -pub mod common; -pub mod endpoint; -pub mod error; -pub mod provider; -pub mod rate_limits; -pub mod requests; -pub mod sse; -pub mod telemetry; +pub(crate) mod api_bridge; +pub(crate) mod auth; +pub(crate) mod common; +pub(crate) mod endpoint; +pub(crate) mod error; +pub(crate) mod provider; +pub(crate) mod rate_limits; +pub(crate) mod requests; +pub(crate) mod sse; +pub(crate) mod telemetry; pub use crate::requests::headers::build_conversation_headers; pub use codex_client::RequestTelemetry; pub use codex_client::ReqwestTransport; pub use codex_client::TransportError; +pub use crate::api_bridge::CoreAuthProvider; +pub use crate::api_bridge::map_api_error; pub use crate::auth::AuthProvider; pub use crate::common::CompactionInput; pub use crate::common::MemorySummarizeInput; pub use crate::common::MemorySummarizeOutput; +pub use crate::common::OpenAiVerbosity; pub use crate::common::RawMemory; pub use crate::common::RawMemoryMetadata; +pub use crate::common::Reasoning; pub use crate::common::ResponseCreateWsRequest; pub use crate::common::ResponseEvent; pub use crate::common::ResponseStream; pub use crate::common::ResponsesApiRequest; +pub use crate::common::ResponsesWsRequest; +pub use crate::common::TextControls; pub use crate::common::WS_REQUEST_HEADER_TRACEPARENT_CLIENT_METADATA_KEY; pub use crate::common::WS_REQUEST_HEADER_TRACESTATE_CLIENT_METADATA_KEY; pub use crate::common::create_text_param_for_request; pub use crate::common::response_create_client_metadata; -pub use crate::endpoint::compact::CompactClient; -pub use crate::endpoint::memories::MemoriesClient; -pub use crate::endpoint::models::ModelsClient; -pub use crate::endpoint::realtime_websocket::RealtimeEventParser; -pub use crate::endpoint::realtime_websocket::RealtimeSessionConfig; -pub use crate::endpoint::realtime_websocket::RealtimeSessionMode; -pub use crate::endpoint::realtime_websocket::RealtimeWebsocketClient; -pub use crate::endpoint::realtime_websocket::RealtimeWebsocketConnection; -pub use crate::endpoint::responses::ResponsesClient; -pub use crate::endpoint::responses::ResponsesOptions; -pub use crate::endpoint::responses_websocket::ResponsesWebsocketClient; -pub use crate::endpoint::responses_websocket::ResponsesWebsocketConnection; +pub use crate::endpoint::CompactClient; +pub use crate::endpoint::MemoriesClient; +pub use crate::endpoint::ModelsClient; +pub use crate::endpoint::RealtimeEventParser; +pub use crate::endpoint::RealtimeSessionConfig; +pub use crate::endpoint::RealtimeSessionMode; +pub use crate::endpoint::RealtimeWebsocketClient; +pub use crate::endpoint::RealtimeWebsocketConnection; +pub use crate::endpoint::RealtimeWebsocketEvents; +pub use crate::endpoint::RealtimeWebsocketWriter; +pub use crate::endpoint::ResponsesClient; +pub use crate::endpoint::ResponsesOptions; +pub use crate::endpoint::ResponsesWebsocketClient; +pub use crate::endpoint::ResponsesWebsocketConnection; pub use crate::error::ApiError; pub use crate::provider::Provider; +pub use crate::provider::RetryConfig; pub use crate::provider::is_azure_responses_wire_base_url; +pub use crate::requests::Compression; pub use crate::sse::stream_from_fixture; pub use crate::telemetry::SseTelemetry; pub use crate::telemetry::WebsocketTelemetry; diff --git a/codex-rs/codex-api/src/requests/mod.rs b/codex-rs/codex-api/src/requests/mod.rs index 4e98bb8cac..1c357b2a61 100644 --- a/codex-rs/codex-api/src/requests/mod.rs +++ b/codex-rs/codex-api/src/requests/mod.rs @@ -1,2 +1,5 @@ pub(crate) mod headers; -pub mod responses; +pub(crate) mod responses; + +pub use responses::Compression; +pub(crate) use responses::attach_item_ids; diff --git a/codex-rs/codex-api/src/sse/mod.rs b/codex-rs/codex-api/src/sse/mod.rs index cb689afc1d..06b9855890 100644 --- a/codex-rs/codex-api/src/sse/mod.rs +++ b/codex-rs/codex-api/src/sse/mod.rs @@ -1,5 +1,6 @@ -pub mod responses; +pub(crate) mod responses; -pub use responses::process_sse; +pub(crate) use responses::ResponsesStreamEvent; +pub(crate) use responses::process_responses_event; pub use responses::spawn_response_stream; pub use responses::stream_from_fixture; diff --git a/codex-rs/codex-api/tests/clients.rs b/codex-rs/codex-api/tests/clients.rs index ba0f416788..2e8411dd81 100644 --- a/codex-rs/codex-api/tests/clients.rs +++ b/codex-rs/codex-api/tests/clients.rs @@ -6,11 +6,11 @@ use anyhow::Result; use async_trait::async_trait; use bytes::Bytes; use codex_api::AuthProvider; +use codex_api::Compression; use codex_api::Provider; use codex_api::ResponsesApiRequest; use codex_api::ResponsesClient; use codex_api::ResponsesOptions; -use codex_api::requests::responses::Compression; use codex_client::HttpTransport; use codex_client::Request; use codex_client::Response; @@ -126,7 +126,7 @@ fn provider(name: &str) -> Provider { base_url: "https://example.com/v1".to_string(), query_params: None, headers: HeaderMap::new(), - retry: codex_api::provider::RetryConfig { + retry: codex_api::RetryConfig { max_attempts: 1, base_delay: Duration::from_millis(1), retry_429: false, @@ -266,7 +266,7 @@ async fn streaming_client_retries_on_transport_error() -> Result<()> { let request = ResponsesApiRequest { model: "gpt-test".into(), - instructions: "Say hi".into(), + instructions: Some("Say hi".into()), input: Vec::new(), tools: Vec::new(), tool_choice: "auto".into(), @@ -278,6 +278,7 @@ async fn streaming_client_retries_on_transport_error() -> Result<()> { service_tier: None, prompt_cache_key: None, text: None, + client_metadata: None, }; let client = ResponsesClient::new(transport.clone(), provider, NoAuth); @@ -302,7 +303,7 @@ async fn azure_default_store_attaches_ids_and_headers() -> Result<()> { let request = ResponsesApiRequest { model: "gpt-test".into(), - instructions: "Say hi".into(), + instructions: Some("Say hi".into()), input: vec![ResponseItem::Message { id: Some("msg_1".into()), role: "user".into(), @@ -320,6 +321,7 @@ async fn azure_default_store_attaches_ids_and_headers() -> Result<()> { service_tier: None, prompt_cache_key: None, text: None, + client_metadata: None, }; let mut extra_headers = HeaderMap::new(); diff --git a/codex-rs/codex-api/tests/models_integration.rs b/codex-rs/codex-api/tests/models_integration.rs index 3ffb2496b4..c6b28fa71a 100644 --- a/codex-rs/codex-api/tests/models_integration.rs +++ b/codex-rs/codex-api/tests/models_integration.rs @@ -1,7 +1,7 @@ use codex_api::AuthProvider; use codex_api::ModelsClient; -use codex_api::provider::Provider; -use codex_api::provider::RetryConfig; +use codex_api::Provider; +use codex_api::RetryConfig; use codex_client::ReqwestTransport; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::openai_models::ConfigShellToolType; diff --git a/codex-rs/codex-api/tests/realtime_websocket_e2e.rs b/codex-rs/codex-api/tests/realtime_websocket_e2e.rs index 130ab6fd35..42c7a92e2d 100644 --- a/codex-rs/codex-api/tests/realtime_websocket_e2e.rs +++ b/codex-rs/codex-api/tests/realtime_websocket_e2e.rs @@ -2,14 +2,14 @@ use std::collections::HashMap; use std::future::Future; use std::time::Duration; +use codex_api::Provider; use codex_api::RealtimeAudioFrame; use codex_api::RealtimeEvent; use codex_api::RealtimeEventParser; use codex_api::RealtimeSessionConfig; use codex_api::RealtimeSessionMode; use codex_api::RealtimeWebsocketClient; -use codex_api::provider::Provider; -use codex_api::provider::RetryConfig; +use codex_api::RetryConfig; use codex_protocol::protocol::RealtimeHandoffRequested; use futures::SinkExt; use futures::StreamExt; diff --git a/codex-rs/codex-api/tests/sse_end_to_end.rs b/codex-rs/codex-api/tests/sse_end_to_end.rs index 80972340dd..b15de296a8 100644 --- a/codex-rs/codex-api/tests/sse_end_to_end.rs +++ b/codex-rs/codex-api/tests/sse_end_to_end.rs @@ -4,10 +4,10 @@ use anyhow::Result; use async_trait::async_trait; use bytes::Bytes; use codex_api::AuthProvider; +use codex_api::Compression; use codex_api::Provider; use codex_api::ResponseEvent; use codex_api::ResponsesClient; -use codex_api::requests::responses::Compression; use codex_client::HttpTransport; use codex_client::Request; use codex_client::Response; @@ -64,7 +64,7 @@ fn provider(name: &str) -> Provider { base_url: "https://example.com/v1".to_string(), query_params: None, headers: HeaderMap::new(), - retry: codex_api::provider::RetryConfig { + retry: codex_api::RetryConfig { max_attempts: 1, base_delay: Duration::from_millis(1), retry_429: false, diff --git a/codex-rs/codex-backend-openapi-models/src/models/mod.rs b/codex-rs/codex-backend-openapi-models/src/models/mod.rs index 90bc058334..2140c83f91 100644 --- a/codex-rs/codex-backend-openapi-models/src/models/mod.rs +++ b/codex-rs/codex-backend-openapi-models/src/models/mod.rs @@ -4,41 +4,41 @@ // The process for this will change // Config -pub mod config_file_response; +pub(crate) mod config_file_response; pub use self::config_file_response::ConfigFileResponse; // Cloud Tasks -pub mod code_task_details_response; +pub(crate) mod code_task_details_response; pub use self::code_task_details_response::CodeTaskDetailsResponse; -pub mod task_response; +pub(crate) mod task_response; pub use self::task_response::TaskResponse; -pub mod external_pull_request_response; +pub(crate) mod external_pull_request_response; pub use self::external_pull_request_response::ExternalPullRequestResponse; -pub mod git_pull_request; +pub(crate) mod git_pull_request; pub use self::git_pull_request::GitPullRequest; -pub mod task_list_item; +pub(crate) mod task_list_item; pub use self::task_list_item::TaskListItem; -pub mod paginated_list_task_list_item_; +pub(crate) mod paginated_list_task_list_item_; pub use self::paginated_list_task_list_item_::PaginatedListTaskListItem; // Rate Limits -pub mod additional_rate_limit_details; +pub(crate) mod additional_rate_limit_details; pub use self::additional_rate_limit_details::AdditionalRateLimitDetails; -pub mod rate_limit_status_payload; +pub(crate) mod rate_limit_status_payload; pub use self::rate_limit_status_payload::PlanType; pub use self::rate_limit_status_payload::RateLimitStatusPayload; -pub mod rate_limit_status_details; +pub(crate) mod rate_limit_status_details; pub use self::rate_limit_status_details::RateLimitStatusDetails; -pub mod rate_limit_window_snapshot; +pub(crate) mod rate_limit_window_snapshot; pub use self::rate_limit_window_snapshot::RateLimitWindowSnapshot; -pub mod credit_status_details; +pub(crate) mod credit_status_details; pub use self::credit_status_details::CreditStatusDetails; diff --git a/codex-rs/codex-mcp/src/lib.rs b/codex-rs/codex-mcp/src/lib.rs index 149578c967..48969841c4 100644 --- a/codex-rs/codex-mcp/src/lib.rs +++ b/codex-rs/codex-mcp/src/lib.rs @@ -1,2 +1,40 @@ -pub mod mcp; -pub mod mcp_connection_manager; +pub(crate) mod mcp; +pub(crate) mod mcp_connection_manager; + +pub use mcp::CODEX_APPS_MCP_SERVER_NAME; +pub use mcp::McpAuthStatusEntry; +pub use mcp::McpConfig; +pub use mcp::McpManager; +pub use mcp::McpOAuthLoginConfig; +pub use mcp::McpOAuthLoginSupport; +pub use mcp::McpOAuthScopesSource; +pub use mcp::McpSnapshotDetail; +pub use mcp::ResolvedMcpOAuthScopes; +pub use mcp::ToolPluginProvenance; +pub use mcp::canonical_mcp_server_key; +pub use mcp::collect_mcp_snapshot; +pub use mcp::collect_mcp_snapshot_from_manager; +pub use mcp::collect_mcp_snapshot_from_manager_with_detail; +pub use mcp::collect_mcp_snapshot_with_detail; +pub use mcp::collect_missing_mcp_dependencies; +pub use mcp::compute_auth_statuses; +pub use mcp::configured_mcp_servers; +pub use mcp::discover_supported_scopes; +pub use mcp::effective_mcp_servers; +pub use mcp::group_tools_by_server; +pub use mcp::oauth_login_support; +pub use mcp::qualified_mcp_tool_name_prefix; +pub use mcp::resolve_oauth_scopes; +pub use mcp::should_retry_without_scopes; +pub use mcp::split_qualified_tool_name; +pub use mcp::tool_plugin_provenance; +pub use mcp::with_codex_apps_mcp; +pub use mcp_connection_manager::CodexAppsToolsCacheKey; +pub use mcp_connection_manager::DEFAULT_STARTUP_TIMEOUT; +pub use mcp_connection_manager::MCP_SANDBOX_STATE_CAPABILITY; +pub use mcp_connection_manager::MCP_SANDBOX_STATE_METHOD; +pub use mcp_connection_manager::McpConnectionManager; +pub use mcp_connection_manager::SandboxState; +pub use mcp_connection_manager::ToolInfo; +pub use mcp_connection_manager::codex_apps_tools_cache_key; +pub use mcp_connection_manager::filter_non_codex_apps_mcp_tools_only; diff --git a/codex-rs/codex-mcp/src/mcp/auth.rs b/codex-rs/codex-mcp/src/mcp/auth.rs index a4e14e42f9..01dc5e9b46 100644 --- a/codex-rs/codex-mcp/src/mcp/auth.rs +++ b/codex-rs/codex-mcp/src/mcp/auth.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; use anyhow::Result; +use codex_config::types::OAuthCredentialsStoreMode; use codex_protocol::protocol::McpAuthStatus; -use codex_rmcp_client::OAuthCredentialsStoreMode; use codex_rmcp_client::OAuthProviderError; use codex_rmcp_client::determine_streamable_http_auth_status; use codex_rmcp_client::discover_streamable_http_oauth; diff --git a/codex-rs/codex-mcp/src/mcp/mod.rs b/codex-rs/codex-mcp/src/mcp/mod.rs index b843bbadc0..9301ff1008 100644 --- a/codex-rs/codex-mcp/src/mcp/mod.rs +++ b/codex-rs/codex-mcp/src/mcp/mod.rs @@ -1,5 +1,15 @@ -pub mod auth; +pub(crate) mod auth; mod skill_dependencies; +pub use auth::McpAuthStatusEntry; +pub use auth::McpOAuthLoginConfig; +pub use auth::McpOAuthLoginSupport; +pub use auth::McpOAuthScopesSource; +pub use auth::ResolvedMcpOAuthScopes; +pub use auth::compute_auth_statuses; +pub use auth::discover_supported_scopes; +pub use auth::oauth_login_support; +pub use auth::resolve_oauth_scopes; +pub use auth::should_retry_without_scopes; pub use skill_dependencies::canonical_mcp_server_key; pub use skill_dependencies::collect_missing_mcp_dependencies; @@ -12,6 +22,7 @@ use async_channel::unbounded; use codex_config::Constrained; use codex_config::McpServerConfig; use codex_config::McpServerTransportConfig; +use codex_config::types::OAuthCredentialsStoreMode; use codex_login::CodexAuth; use codex_plugin::PluginCapabilitySummary; use codex_protocol::mcp::Resource; @@ -20,10 +31,8 @@ use codex_protocol::mcp::Tool; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::McpListToolsResponseEvent; use codex_protocol::protocol::SandboxPolicy; -use codex_rmcp_client::OAuthCredentialsStoreMode; use serde_json::Value; -use crate::mcp::auth::compute_auth_statuses; use crate::mcp_connection_manager::McpConnectionManager; use crate::mcp_connection_manager::SandboxState; use crate::mcp_connection_manager::codex_apps_tools_cache_key; @@ -34,6 +43,19 @@ const MCP_TOOL_NAME_DELIMITER: &str = "__"; pub const CODEX_APPS_MCP_SERVER_NAME: &str = "codex_apps"; const CODEX_CONNECTORS_TOKEN_ENV_VAR: &str = "CODEX_CONNECTORS_TOKEN"; +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub enum McpSnapshotDetail { + #[default] + Full, + ToolsAndAuthOnly, +} + +impl McpSnapshotDetail { + fn include_resources(self) -> bool { + matches!(self, Self::Full) + } +} + /// The Responses API requires tool names to match `^[a-zA-Z0-9_-]+$`. /// MCP server/tool names are user-controlled, so sanitize the fully-qualified /// name we expose to the model by replacing any disallowed character with `_`. @@ -283,6 +305,15 @@ pub async fn collect_mcp_snapshot( config: &McpConfig, auth: Option<&CodexAuth>, submit_id: String, +) -> McpListToolsResponseEvent { + collect_mcp_snapshot_with_detail(config, auth, submit_id, McpSnapshotDetail::Full).await +} + +pub async fn collect_mcp_snapshot_with_detail( + config: &McpConfig, + auth: Option<&CodexAuth>, + submit_id: String, + detail: McpSnapshotDetail, ) -> McpListToolsResponseEvent { let mcp_servers = effective_mcp_servers(config, auth); let tool_plugin_provenance = tool_plugin_provenance(config); @@ -323,8 +354,12 @@ pub async fn collect_mcp_snapshot( ) .await; - let snapshot = - collect_mcp_snapshot_from_manager(&mcp_connection_manager, auth_status_entries).await; + let snapshot = collect_mcp_snapshot_from_manager_with_detail( + &mcp_connection_manager, + auth_status_entries, + detail, + ) + .await; cancel_token.cancel(); @@ -362,12 +397,37 @@ pub fn group_tools_by_server( pub async fn collect_mcp_snapshot_from_manager( mcp_connection_manager: &McpConnectionManager, - auth_status_entries: HashMap, + auth_status_entries: HashMap, +) -> McpListToolsResponseEvent { + collect_mcp_snapshot_from_manager_with_detail( + mcp_connection_manager, + auth_status_entries, + McpSnapshotDetail::Full, + ) + .await +} + +pub async fn collect_mcp_snapshot_from_manager_with_detail( + mcp_connection_manager: &McpConnectionManager, + auth_status_entries: HashMap, + detail: McpSnapshotDetail, ) -> McpListToolsResponseEvent { let (tools, resources, resource_templates) = tokio::join!( mcp_connection_manager.list_all_tools(), - mcp_connection_manager.list_all_resources(), - mcp_connection_manager.list_all_resource_templates(), + async { + if detail.include_resources() { + mcp_connection_manager.list_all_resources().await + } else { + HashMap::new() + } + }, + async { + if detail.include_resources() { + mcp_connection_manager.list_all_resource_templates().await + } else { + HashMap::new() + } + }, ); let auth_statuses = auth_status_entries diff --git a/codex-rs/codex-mcp/src/mcp_connection_manager.rs b/codex-rs/codex-mcp/src/mcp_connection_manager.rs index 9a995a5c76..c90088c7dc 100644 --- a/codex-rs/codex-mcp/src/mcp_connection_manager.rs +++ b/codex-rs/codex-mcp/src/mcp_connection_manager.rs @@ -19,10 +19,10 @@ use std::sync::atomic::Ordering; use std::time::Duration; use std::time::Instant; +use crate::McpAuthStatusEntry; use crate::mcp::CODEX_APPS_MCP_SERVER_NAME; use crate::mcp::McpConfig; use crate::mcp::ToolPluginProvenance; -use crate::mcp::auth::McpAuthStatusEntry; use crate::mcp::configured_mcp_servers; use crate::mcp::effective_mcp_servers; use crate::mcp::sanitize_responses_api_tool_name; @@ -34,6 +34,7 @@ use async_channel::Sender; use codex_async_utils::CancelErr; use codex_async_utils::OrCancelExt; use codex_config::Constrained; +use codex_config::types::OAuthCredentialsStoreMode; use codex_protocol::approvals::ElicitationRequest; use codex_protocol::approvals::ElicitationRequestEvent; use codex_protocol::mcp::CallToolResult; @@ -47,7 +48,6 @@ use codex_protocol::protocol::McpStartupStatus; use codex_protocol::protocol::McpStartupUpdateEvent; use codex_protocol::protocol::SandboxPolicy; use codex_rmcp_client::ElicitationResponse; -use codex_rmcp_client::OAuthCredentialsStoreMode; use codex_rmcp_client::RmcpClient; use codex_rmcp_client::SendElicitation; use futures::future::BoxFuture; @@ -1568,7 +1568,7 @@ fn filter_disallowed_codex_apps_tools(tools: Vec) -> Vec { } fn emit_duration(metric: &str, duration: Duration, tags: &[(&str, &str)]) { - if let Some(metrics) = codex_otel::metrics::global() { + if let Some(metrics) = codex_otel::global() { let _ = metrics.record_duration(metric, duration, tags); } } diff --git a/codex-rs/config/Cargo.toml b/codex-rs/config/Cargo.toml index 0b440263eb..9532d74d00 100644 --- a/codex-rs/config/Cargo.toml +++ b/codex-rs/config/Cargo.toml @@ -8,10 +8,16 @@ license.workspace = true workspace = true [dependencies] +anyhow = { workspace = true } codex-app-server-protocol = { workspace = true } codex-execpolicy = { workspace = true } +codex-features = { workspace = true } +codex-git-utils = { workspace = true } +codex-model-provider-info = { workspace = true } +codex-network-proxy = { workspace = true } codex-protocol = { workspace = true } codex-utils-absolute-path = { workspace = true } +dunce = { workspace = true } futures = { workspace = true, features = ["alloc", "std"] } multimap = { workspace = true } schemars = { workspace = true } @@ -27,7 +33,6 @@ tracing = { workspace = true } wildmatch = { workspace = true } [dev-dependencies] -anyhow = { workspace = true } pretty_assertions = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/codex-rs/config/src/config_requirements.rs b/codex-rs/config/src/config_requirements.rs index 7f6b5bc1e5..eb1c1033a5 100644 --- a/codex-rs/config/src/config_requirements.rs +++ b/codex-rs/config/src/config_requirements.rs @@ -237,6 +237,8 @@ pub struct NetworkRequirementsToml { /// When true, only managed `allowed_domains` are respected while managed /// network enforcement is active. User allowlist entries are ignored. pub managed_allowed_domains_only: Option, + /// In danger-full-access mode, allow all network access and enforce managed deny entries. + pub danger_full_access_denylist_only: Option, pub unix_sockets: Option, pub allow_local_binding: Option, } @@ -255,6 +257,8 @@ struct RawNetworkRequirementsToml { /// When true, only managed `allowed_domains` are respected while managed /// network enforcement is active. User allowlist entries are ignored. managed_allowed_domains_only: Option, + /// In danger-full-access mode, allow all network access and enforce managed deny entries. + danger_full_access_denylist_only: Option, #[serde(default)] denied_domains: Option>, unix_sockets: Option, @@ -279,6 +283,7 @@ impl<'de> Deserialize<'de> for NetworkRequirementsToml { domains, allowed_domains, managed_allowed_domains_only, + danger_full_access_denylist_only, denied_domains, unix_sockets, allow_unix_sockets, @@ -307,6 +312,7 @@ impl<'de> Deserialize<'de> for NetworkRequirementsToml { domains: domains .or_else(|| legacy_domain_permissions_from_lists(allowed_domains, denied_domains)), managed_allowed_domains_only, + danger_full_access_denylist_only, unix_sockets: unix_sockets .or_else(|| legacy_unix_socket_permissions_from_list(allow_unix_sockets)), allow_local_binding, @@ -359,6 +365,8 @@ pub struct NetworkConstraints { /// When true, only managed `allowed_domains` are respected while managed /// network enforcement is active. User allowlist entries are ignored. pub managed_allowed_domains_only: Option, + /// In danger-full-access mode, allow all network access and enforce managed deny entries. + pub danger_full_access_denylist_only: Option, pub unix_sockets: Option, pub allow_local_binding: Option, } @@ -384,6 +392,7 @@ impl From for NetworkConstraints { dangerously_allow_all_unix_sockets, domains, managed_allowed_domains_only, + danger_full_access_denylist_only, unix_sockets, allow_local_binding, } = value; @@ -396,6 +405,7 @@ impl From for NetworkConstraints { dangerously_allow_all_unix_sockets, domains, managed_allowed_domains_only, + danger_full_access_denylist_only, unix_sockets, allow_local_binding, } @@ -1808,6 +1818,7 @@ allowed_approvals_reviewers = ["user"] allow_upstream_proxy = false dangerously_allow_all_unix_sockets = true managed_allowed_domains_only = true + danger_full_access_denylist_only = true allow_local_binding = false [experimental_network.domains] @@ -1858,6 +1869,10 @@ allowed_approvals_reviewers = ["user"] sourced_network.value.managed_allowed_domains_only, Some(true) ); + assert_eq!( + sourced_network.value.danger_full_access_denylist_only, + Some(true) + ); assert_eq!( sourced_network.value.unix_sockets.as_ref(), Some(&NetworkUnixSocketPermissionsToml { @@ -1881,6 +1896,7 @@ allowed_approvals_reviewers = ["user"] dangerously_allow_all_unix_sockets = true allowed_domains = ["api.example.com", "*.openai.com"] managed_allowed_domains_only = true + danger_full_access_denylist_only = true denied_domains = ["blocked.example.com"] allow_unix_sockets = ["/tmp/example.sock"] allow_local_binding = false @@ -1925,6 +1941,10 @@ allowed_approvals_reviewers = ["user"] sourced_network.value.managed_allowed_domains_only, Some(true) ); + assert_eq!( + sourced_network.value.danger_full_access_denylist_only, + Some(true) + ); assert_eq!( sourced_network.value.unix_sockets.as_ref(), Some(&NetworkUnixSocketPermissionsToml { diff --git a/codex-rs/config/src/config_toml.rs b/codex-rs/config/src/config_toml.rs new file mode 100644 index 0000000000..caf9d25b88 --- /dev/null +++ b/codex-rs/config/src/config_toml.rs @@ -0,0 +1,778 @@ +//! Schema-heavy configuration TOML types used by Codex. + +use std::collections::BTreeMap; +use std::collections::HashMap; +use std::path::Path; + +use crate::permissions_toml::PermissionsToml; +use crate::profile_toml::ConfigProfile; +use crate::types::AnalyticsConfigToml; +use crate::types::ApprovalsReviewer; +use crate::types::AppsConfigToml; +use crate::types::AuthCredentialsStoreMode; +use crate::types::FeedbackConfigToml; +use crate::types::History; +use crate::types::McpServerConfig; +use crate::types::MemoriesToml; +use crate::types::Notice; +use crate::types::OAuthCredentialsStoreMode; +use crate::types::OtelConfigToml; +use crate::types::PluginConfig; +use crate::types::SandboxWorkspaceWrite; +use crate::types::ShellEnvironmentPolicyToml; +use crate::types::SkillsConfig; +use crate::types::ToolSuggestConfig; +use crate::types::Tui; +use crate::types::UriBasedFileOpener; +use crate::types::WindowsToml; +use codex_app_server_protocol::Tools; +use codex_app_server_protocol::UserSavedConfig; +use codex_features::FeaturesToml; +use codex_git_utils::resolve_root_git_project_for_trust; +use codex_model_provider_info::LEGACY_OLLAMA_CHAT_PROVIDER_ID; +use codex_model_provider_info::LMSTUDIO_OSS_PROVIDER_ID; +use codex_model_provider_info::ModelProviderInfo; +use codex_model_provider_info::OLLAMA_CHAT_PROVIDER_REMOVED_ERROR; +use codex_model_provider_info::OLLAMA_OSS_PROVIDER_ID; +use codex_model_provider_info::OPENAI_PROVIDER_ID; +use codex_protocol::config_types::ForcedLoginMethod; +use codex_protocol::config_types::Personality; +use codex_protocol::config_types::ReasoningSummary; +use codex_protocol::config_types::SandboxMode; +use codex_protocol::config_types::ServiceTier; +use codex_protocol::config_types::TrustLevel; +use codex_protocol::config_types::Verbosity; +use codex_protocol::config_types::WebSearchMode; +use codex_protocol::config_types::WebSearchToolConfig; +use codex_protocol::config_types::WindowsSandboxLevel; +use codex_protocol::openai_models::ReasoningEffort; +use codex_protocol::protocol::AskForApproval; +use codex_protocol::protocol::ReadOnlyAccess; +use codex_protocol::protocol::SandboxPolicy; +use codex_utils_absolute_path::AbsolutePathBuf; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Deserializer; +use serde::Serialize; + +const RESERVED_MODEL_PROVIDER_IDS: [&str; 3] = [ + OPENAI_PROVIDER_ID, + OLLAMA_OSS_PROVIDER_ID, + LMSTUDIO_OSS_PROVIDER_ID, +]; + +/// Base config deserialized from ~/.codex/config.toml. +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, JsonSchema)] +#[schemars(deny_unknown_fields)] +pub struct ConfigToml { + /// Optional override of model selection. + pub model: Option, + /// Review model override used by the `/review` feature. + pub review_model: Option, + + /// Provider to use from the model_providers map. + pub model_provider: Option, + + /// Size of the context window for the model, in tokens. + pub model_context_window: Option, + + /// Token usage threshold triggering auto-compaction of conversation history. + pub model_auto_compact_token_limit: Option, + + /// Default approval policy for executing commands. + pub approval_policy: Option, + + /// Configures who approval requests are routed to for review once they have + /// been escalated. This does not disable separate safety checks such as + /// ARC. + pub approvals_reviewer: Option, + + #[serde(default)] + pub shell_environment_policy: ShellEnvironmentPolicyToml, + + /// Whether the model may request a login shell for shell-based tools. + /// Default to `true` + /// + /// If `true`, the model may request a login shell (`login = true`), and + /// omitting `login` defaults to using a login shell. + /// If `false`, the model can never use a login shell: `login = true` + /// requests are rejected, and omitting `login` defaults to a non-login + /// shell. + pub allow_login_shell: Option, + + /// Sandbox mode to use. + pub sandbox_mode: Option, + + /// Sandbox configuration to apply if `sandbox` is `WorkspaceWrite`. + pub sandbox_workspace_write: Option, + + /// Default named permissions profile to apply from the `[permissions]` + /// table. + pub default_permissions: Option, + + /// Named permissions profiles. + #[serde(default)] + pub permissions: Option, + + /// Optional external command to spawn for end-user notifications. + #[serde(default)] + pub notify: Option>, + + /// System instructions. + pub instructions: Option, + + /// Developer instructions inserted as a `developer` role message. + #[serde(default)] + pub developer_instructions: Option, + + /// Whether to inject the `` developer block. + pub include_permissions_instructions: Option, + + /// Whether to inject the `` developer block. + pub include_apps_instructions: Option, + + /// Whether to inject the `` user block. + pub include_environment_context: Option, + + /// Optional path to a file containing model instructions that will override + /// the built-in instructions for the selected model. Users are STRONGLY + /// DISCOURAGED from using this field, as deviating from the instructions + /// sanctioned by Codex will likely degrade model performance. + pub model_instructions_file: Option, + + /// Compact prompt used for history compaction. + pub compact_prompt: Option, + + /// Optional commit attribution text for commit message co-author trailers. + /// + /// Set to an empty string to disable automatic commit attribution. + pub commit_attribution: Option, + + /// When set, restricts ChatGPT login to a specific workspace identifier. + #[serde(default)] + pub forced_chatgpt_workspace_id: Option, + + /// When set, restricts the login mechanism users may use. + #[serde(default)] + pub forced_login_method: Option, + + /// Preferred backend for storing CLI auth credentials. + /// file (default): Use a file in the Codex home directory. + /// keyring: Use an OS-specific keyring service. + /// auto: Use the keyring if available, otherwise use a file. + #[serde(default)] + pub cli_auth_credentials_store: Option, + + /// Definition for MCP servers that Codex can reach out to for tool calls. + #[serde(default)] + // Uses the raw MCP input shape (custom deserialization) rather than `McpServerConfig`. + #[schemars(schema_with = "crate::schema::mcp_servers_schema")] + pub mcp_servers: HashMap, + + /// Preferred backend for storing MCP OAuth credentials. + /// keyring: Use an OS-specific keyring service. + /// https://github.com/openai/codex/blob/main/codex-rs/rmcp-client/src/oauth.rs#L2 + /// file: Use a file in the Codex home directory. + /// auto (default): Use the OS-specific keyring service if available, otherwise use a file. + #[serde(default)] + pub mcp_oauth_credentials_store: Option, + + /// Optional fixed port for the local HTTP callback server used during MCP OAuth login. + /// When unset, Codex will bind to an ephemeral port chosen by the OS. + pub mcp_oauth_callback_port: Option, + + /// Optional redirect URI to use during MCP OAuth login. + /// When set, this URI is used in the OAuth authorization request instead + /// of the local listener address. The local callback listener still binds + /// to 127.0.0.1 (using `mcp_oauth_callback_port` when provided). + pub mcp_oauth_callback_url: Option, + + /// User-defined provider entries that extend the built-in list. Built-in + /// IDs cannot be overridden. + #[serde(default, deserialize_with = "deserialize_model_providers")] + pub model_providers: HashMap, + + /// Maximum number of bytes to include from an AGENTS.md project doc file. + pub project_doc_max_bytes: Option, + + /// Ordered list of fallback filenames to look for when AGENTS.md is missing. + pub project_doc_fallback_filenames: Option>, + + /// Token budget applied when storing tool/function outputs in the context manager. + pub tool_output_token_limit: Option, + + /// Maximum poll window for background terminal output (`write_stdin`), in milliseconds. + /// Default: `300000` (5 minutes). + pub background_terminal_max_timeout: Option, + + /// Optional absolute path to the Node runtime used by `js_repl`. + pub js_repl_node_path: Option, + + /// Ordered list of directories to search for Node modules in `js_repl`. + pub js_repl_node_module_dirs: Option>, + + /// Optional absolute path to patched zsh used by zsh-exec-bridge-backed shell execution. + pub zsh_path: Option, + + /// Profile to use from the `profiles` map. + pub profile: Option, + + /// Named profiles to facilitate switching between different configurations. + #[serde(default)] + pub profiles: HashMap, + + /// Settings that govern if and what will be written to `~/.codex/history.jsonl`. + #[serde(default)] + pub history: Option, + + /// Directory where Codex stores the SQLite state DB. + /// Defaults to `$CODEX_SQLITE_HOME` when set. Otherwise uses `$CODEX_HOME`. + pub sqlite_home: Option, + + /// Directory where Codex writes log files, for example `codex-tui.log`. + /// Defaults to `$CODEX_HOME/log`. + pub log_dir: Option, + + /// Optional URI-based file opener. If set, citations to files in the model + /// output will be hyperlinked using the specified URI scheme. + pub file_opener: Option, + + /// Collection of settings that are specific to the TUI. + pub tui: Option, + + /// When set to `true`, `AgentReasoning` events will be hidden from the + /// UI/output. Defaults to `false`. + pub hide_agent_reasoning: Option, + + /// When set to `true`, `AgentReasoningRawContentEvent` events will be shown in the UI/output. + /// Defaults to `false`. + pub show_raw_agent_reasoning: Option, + + pub model_reasoning_effort: Option, + pub plan_mode_reasoning_effort: Option, + pub model_reasoning_summary: Option, + /// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`). + pub model_verbosity: Option, + + /// Override to force-enable reasoning summaries for the configured model. + pub model_supports_reasoning_summaries: Option, + + /// Optional path to a JSON model catalog (applied on startup only). + /// Per-thread `config` overrides are accepted but do not reapply this (no-ops). + pub model_catalog_json: Option, + + /// Optionally specify a personality for the model + pub personality: Option, + + /// Optional explicit service tier preference for new turns (`fast` or `flex`). + pub service_tier: Option, + + /// Base URL for requests to ChatGPT (as opposed to the OpenAI API). + pub chatgpt_base_url: Option, + + /// Base URL override for the built-in `openai` model provider. + pub openai_base_url: Option, + + /// Machine-local realtime audio device preferences used by realtime voice. + #[serde(default)] + pub audio: Option, + + /// Experimental / do not use. Overrides only the realtime conversation + /// websocket transport base URL (the `Op::RealtimeConversation` + /// `/v1/realtime` + /// connection) without changing normal provider HTTP requests. + pub experimental_realtime_ws_base_url: Option, + /// Experimental / do not use. Selects the realtime websocket model/snapshot + /// used for the `Op::RealtimeConversation` connection. + pub experimental_realtime_ws_model: Option, + /// Experimental / do not use. Realtime websocket session selection. + /// `version` controls v1/v2 and `type` controls conversational/transcription. + #[serde(default)] + pub realtime: Option, + /// Experimental / do not use. Overrides only the realtime conversation + /// websocket transport instructions (the `Op::RealtimeConversation` + /// `/ws` session.update instructions) without changing normal prompts. + pub experimental_realtime_ws_backend_prompt: Option, + /// Experimental / do not use. Replaces the synthesized realtime startup + /// context appended to websocket session instructions. An empty string + /// disables startup context injection entirely. + pub experimental_realtime_ws_startup_context: Option, + /// Experimental / do not use. Replaces the built-in realtime start + /// instructions inserted into developer messages when realtime becomes + /// active. + pub experimental_realtime_start_instructions: Option, + pub projects: Option>, + + /// Controls the web search tool mode: disabled, cached, or live. + pub web_search: Option, + + /// Nested tools section for feature toggles + pub tools: Option, + + /// Additional discoverable tools that can be suggested for installation. + pub tool_suggest: Option, + + /// Agent-related settings (thread limits, etc.). + pub agents: Option, + + /// Memories subsystem settings. + pub memories: Option, + + /// User-level skill config entries keyed by SKILL.md path. + pub skills: Option, + + /// User-level plugin config entries keyed by plugin name. + #[serde(default)] + pub plugins: HashMap, + + /// Centralized feature flags (new). Prefer this over individual toggles. + #[serde(default)] + // Injects known feature keys into the schema and forbids unknown keys. + #[schemars(schema_with = "crate::schema::features_schema")] + pub features: Option, + + /// Suppress warnings about unstable (under development) features. + pub suppress_unstable_features_warning: Option, + + /// Settings for ghost snapshots (used for undo). + #[serde(default)] + pub ghost_snapshot: Option, + + /// Markers used to detect the project root when searching parent + /// directories for `.codex` folders. Defaults to [".git"] when unset. + #[serde(default)] + pub project_root_markers: Option>, + + /// When `true`, checks for Codex updates on startup and surfaces update prompts. + /// Set to `false` only if your Codex updates are centrally managed. + /// Defaults to `true`. + pub check_for_update_on_startup: Option, + + /// When true, disables burst-paste detection for typed input entirely. + /// All characters are inserted as they are received, and no buffering + /// or placeholder replacement will occur for fast keypress bursts. + pub disable_paste_burst: Option, + + /// When `false`, disables analytics across Codex product surfaces in this machine. + /// Defaults to `true`. + pub analytics: Option, + + /// When `false`, disables feedback collection across Codex product surfaces. + /// Defaults to `true`. + pub feedback: Option, + + /// Settings for app-specific controls. + #[serde(default)] + pub apps: Option, + + /// OTEL configuration. + pub otel: Option, + + /// Windows-specific configuration. + #[serde(default)] + pub windows: Option, + + /// Tracks whether the Windows onboarding screen has been acknowledged. + pub windows_wsl_setup_acknowledged: Option, + + /// Collection of in-product notices (different from notifications) + /// See [`crate::types::Notice`] for more details + pub notice: Option, + + /// Legacy, now use features + /// Deprecated: ignored. Use `model_instructions_file`. + #[schemars(skip)] + pub experimental_instructions_file: Option, + pub experimental_compact_prompt_file: Option, + pub experimental_use_unified_exec_tool: Option, + pub experimental_use_freeform_apply_patch: Option, + /// Preferred OSS provider for local models, e.g. "lmstudio" or "ollama". + pub oss_provider: Option, +} + +impl From for UserSavedConfig { + fn from(config_toml: ConfigToml) -> Self { + let profiles = config_toml + .profiles + .into_iter() + .map(|(k, v)| (k, v.into())) + .collect(); + + Self { + approval_policy: config_toml.approval_policy, + sandbox_mode: config_toml.sandbox_mode, + sandbox_settings: config_toml.sandbox_workspace_write.map(From::from), + forced_chatgpt_workspace_id: config_toml.forced_chatgpt_workspace_id, + forced_login_method: config_toml.forced_login_method, + model: config_toml.model, + model_reasoning_effort: config_toml.model_reasoning_effort, + model_reasoning_summary: config_toml.model_reasoning_summary, + model_verbosity: config_toml.model_verbosity, + tools: config_toml.tools.map(From::from), + profile: config_toml.profile, + profiles, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema)] +#[schemars(deny_unknown_fields)] +pub struct ProjectConfig { + pub trust_level: Option, +} + +impl ProjectConfig { + pub fn is_trusted(&self) -> bool { + matches!(self.trust_level, Some(TrustLevel::Trusted)) + } + + pub fn is_untrusted(&self) -> bool { + matches!(self.trust_level, Some(TrustLevel::Untrusted)) + } +} + +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct RealtimeAudioConfig { + pub microphone: Option, + pub speaker: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default, PartialEq, Eq, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub enum RealtimeWsMode { + #[default] + Conversational, + Transcription, +} + +pub use codex_protocol::protocol::RealtimeConversationVersion as RealtimeWsVersion; + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] +#[schemars(deny_unknown_fields)] +pub struct RealtimeConfig { + pub version: RealtimeWsVersion, + #[serde(rename = "type")] + pub session_type: RealtimeWsMode, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] +#[schemars(deny_unknown_fields)] +pub struct RealtimeToml { + pub version: Option, + #[serde(rename = "type")] + pub session_type: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] +#[schemars(deny_unknown_fields)] +pub struct RealtimeAudioToml { + pub microphone: Option, + pub speaker: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, JsonSchema)] +#[schemars(deny_unknown_fields)] +pub struct ToolsToml { + #[serde( + default, + deserialize_with = "deserialize_optional_web_search_tool_config" + )] + pub web_search: Option, + + /// Enable the `view_image` tool that lets the agent attach local images. + #[serde(default)] + pub view_image: Option, +} + +#[derive(Deserialize)] +#[serde(untagged)] +enum WebSearchToolConfigInput { + Enabled(bool), + Config(WebSearchToolConfig), +} + +fn deserialize_optional_web_search_tool_config<'de, D>( + deserializer: D, +) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let value = Option::::deserialize(deserializer)?; + + Ok(match value { + None => None, + Some(WebSearchToolConfigInput::Enabled(enabled)) => { + let _ = enabled; + None + } + Some(WebSearchToolConfigInput::Config(config)) => Some(config), + }) +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] +#[schemars(deny_unknown_fields)] +pub struct AgentsToml { + /// Maximum number of agent threads that can be open concurrently. + /// When unset, no limit is enforced. + #[schemars(range(min = 1))] + pub max_threads: Option, + /// Maximum nesting depth allowed for spawned agent threads. + /// Root sessions start at depth 0. + #[schemars(range(min = 1))] + pub max_depth: Option, + /// Default maximum runtime in seconds for agent job workers. + #[schemars(range(min = 1))] + pub job_max_runtime_seconds: Option, + + /// User-defined role declarations keyed by role name. + /// + /// Example: + /// ```toml + /// [agents.researcher] + /// description = "Research-focused role." + /// config_file = "./agents/researcher.toml" + /// nickname_candidates = ["Herodotus", "Ibn Battuta"] + /// ``` + #[serde(default, flatten)] + pub roles: BTreeMap, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] +#[schemars(deny_unknown_fields)] +pub struct AgentRoleToml { + /// Human-facing role documentation used in spawn tool guidance. + /// Required unless supplied by the referenced agent role file. + pub description: Option, + + /// Path to a role-specific config layer. + /// Relative paths are resolved relative to the `config.toml` that defines them. + pub config_file: Option, + + /// Candidate nicknames for agents spawned with this role. + pub nickname_candidates: Option>, +} + +impl From for Tools { + fn from(tools_toml: ToolsToml) -> Self { + Self { + web_search: tools_toml.web_search.is_some().then_some(true), + view_image: tools_toml.view_image, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] +#[schemars(deny_unknown_fields)] +pub struct GhostSnapshotToml { + /// Exclude untracked files larger than this many bytes from ghost snapshots. + #[serde(alias = "ignore_untracked_files_over_bytes")] + pub ignore_large_untracked_files: Option, + /// Ignore untracked directories that contain this many files or more. + /// (Still emits a warning unless warnings are disabled.) + #[serde(alias = "large_untracked_dir_warning_threshold")] + pub ignore_large_untracked_dirs: Option, + /// Disable all ghost snapshot warning events. + pub disable_warnings: Option, +} + +impl ConfigToml { + /// Derive the effective sandbox policy from the configuration. + pub fn derive_sandbox_policy( + &self, + sandbox_mode_override: Option, + profile_sandbox_mode: Option, + windows_sandbox_level: WindowsSandboxLevel, + resolved_cwd: &Path, + sandbox_policy_constraint: Option<&crate::Constrained>, + ) -> SandboxPolicy { + let sandbox_mode_was_explicit = sandbox_mode_override.is_some() + || profile_sandbox_mode.is_some() + || self.sandbox_mode.is_some(); + let resolved_sandbox_mode = sandbox_mode_override + .or(profile_sandbox_mode) + .or(self.sandbox_mode) + .or_else(|| { + // If no sandbox_mode is set but this directory has a trust decision, + // default to workspace-write except on unsandboxed Windows where we + // default to read-only. + self.get_active_project(resolved_cwd).and_then(|p| { + if p.is_trusted() || p.is_untrusted() { + if cfg!(target_os = "windows") + && windows_sandbox_level == WindowsSandboxLevel::Disabled + { + Some(SandboxMode::ReadOnly) + } else { + Some(SandboxMode::WorkspaceWrite) + } + } else { + None + } + }) + }) + .unwrap_or_default(); + let mut sandbox_policy = match resolved_sandbox_mode { + SandboxMode::ReadOnly => SandboxPolicy::new_read_only_policy(), + SandboxMode::WorkspaceWrite => match self.sandbox_workspace_write.as_ref() { + Some(SandboxWorkspaceWrite { + writable_roots, + network_access, + exclude_tmpdir_env_var, + exclude_slash_tmp, + }) => SandboxPolicy::WorkspaceWrite { + writable_roots: writable_roots.clone(), + read_only_access: ReadOnlyAccess::FullAccess, + network_access: *network_access, + exclude_tmpdir_env_var: *exclude_tmpdir_env_var, + exclude_slash_tmp: *exclude_slash_tmp, + }, + None => SandboxPolicy::new_workspace_write_policy(), + }, + SandboxMode::DangerFullAccess => SandboxPolicy::DangerFullAccess, + }; + let downgrade_workspace_write_if_unsupported = |policy: &mut SandboxPolicy| { + if cfg!(target_os = "windows") + // If the experimental Windows sandbox is enabled, do not force a downgrade. + && windows_sandbox_level == WindowsSandboxLevel::Disabled + && matches!(&*policy, SandboxPolicy::WorkspaceWrite { .. }) + { + *policy = SandboxPolicy::new_read_only_policy(); + } + }; + if matches!(resolved_sandbox_mode, SandboxMode::WorkspaceWrite) { + downgrade_workspace_write_if_unsupported(&mut sandbox_policy); + } + if !sandbox_mode_was_explicit + && let Some(constraint) = sandbox_policy_constraint + && let Err(err) = constraint.can_set(&sandbox_policy) + { + tracing::warn!( + error = %err, + "default sandbox policy is disallowed by requirements; falling back to required default" + ); + sandbox_policy = constraint.get().clone(); + downgrade_workspace_write_if_unsupported(&mut sandbox_policy); + } + sandbox_policy + } + + /// Resolves the cwd to an existing project, or returns None if ConfigToml + /// does not contain a project corresponding to cwd or a git repo for cwd + pub fn get_active_project(&self, resolved_cwd: &Path) -> Option { + let projects = self.projects.clone().unwrap_or_default(); + + let resolved_cwd_key = project_trust_key(resolved_cwd); + let resolved_cwd_raw_key = resolved_cwd.to_string_lossy().to_string(); + if let Some(project_config) = projects + .get(&resolved_cwd_key) + .or_else(|| projects.get(&resolved_cwd_raw_key)) + { + return Some(project_config.clone()); + } + + // If cwd lives inside a git repo/worktree, check whether the root git project + // (the primary repository working directory) is trusted. This lets + // worktrees inherit trust from the main project. + if let Some(repo_root) = resolve_root_git_project_for_trust(resolved_cwd) { + let repo_root_key = project_trust_key(repo_root.as_path()); + let repo_root_raw_key = repo_root.to_string_lossy().to_string(); + if let Some(project_config_for_root) = projects + .get(&repo_root_key) + .or_else(|| projects.get(&repo_root_raw_key)) + { + return Some(project_config_for_root.clone()); + } + } + + None + } + + pub fn get_config_profile( + &self, + override_profile: Option, + ) -> Result { + let profile = override_profile.or_else(|| self.profile.clone()); + + match profile { + Some(key) => { + if let Some(profile) = self.profiles.get(key.as_str()) { + return Ok(profile.clone()); + } + + Err(std::io::Error::new( + std::io::ErrorKind::NotFound, + format!("config profile `{key}` not found"), + )) + } + None => Ok(ConfigProfile::default()), + } + } +} + +/// Canonicalize the path and convert it to a string to be used as a key in the +/// projects trust map. On Windows, strips UNC, when possible, to try to ensure +/// that different paths that point to the same location have the same key. +fn project_trust_key(project_path: &Path) -> String { + dunce::canonicalize(project_path) + .unwrap_or_else(|_| project_path.to_path_buf()) + .to_string_lossy() + .to_string() +} + +pub fn validate_reserved_model_provider_ids( + model_providers: &HashMap, +) -> Result<(), String> { + let mut conflicts = model_providers + .keys() + .filter(|key| RESERVED_MODEL_PROVIDER_IDS.contains(&key.as_str())) + .map(|key| format!("`{key}`")) + .collect::>(); + conflicts.sort_unstable(); + if conflicts.is_empty() { + Ok(()) + } else { + Err(format!( + "model_providers contains reserved built-in provider IDs: {}. \ +Built-in providers cannot be overridden. Rename your custom provider (for example, `openai-custom`).", + conflicts.join(", ") + )) + } +} + +pub fn validate_model_providers( + model_providers: &HashMap, +) -> Result<(), String> { + validate_reserved_model_provider_ids(model_providers)?; + for (key, provider) in model_providers { + provider + .validate() + .map_err(|message| format!("model_providers.{key}: {message}"))?; + } + Ok(()) +} + +fn deserialize_model_providers<'de, D>( + deserializer: D, +) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + let model_providers = HashMap::::deserialize(deserializer)?; + validate_model_providers(&model_providers).map_err(serde::de::Error::custom)?; + Ok(model_providers) +} + +pub fn validate_oss_provider(provider: &str) -> std::io::Result<()> { + match provider { + LMSTUDIO_OSS_PROVIDER_ID | OLLAMA_OSS_PROVIDER_ID => Ok(()), + LEGACY_OLLAMA_CHAT_PROVIDER_ID => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + OLLAMA_CHAT_PROVIDER_REMOVED_ERROR, + )), + _ => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + format!( + "Invalid OSS provider '{provider}'. Must be one of: {LMSTUDIO_OSS_PROVIDER_ID}, {OLLAMA_OSS_PROVIDER_ID}" + ), + )), + } +} diff --git a/codex-rs/config/src/lib.rs b/codex-rs/config/src/lib.rs index 2196011323..5b8e5e6878 100644 --- a/codex-rs/config/src/lib.rs +++ b/codex-rs/config/src/lib.rs @@ -1,5 +1,6 @@ mod cloud_requirements; mod config_requirements; +pub mod config_toml; mod constraint; mod diagnostics; mod fingerprint; @@ -7,8 +8,11 @@ mod mcp_edit; mod mcp_types; mod merge; mod overrides; +pub mod permissions_toml; +pub mod profile_toml; mod project_root_markers; mod requirements_exec_policy; +pub mod schema; mod skills_config; mod state; pub mod types; diff --git a/codex-rs/config/src/permissions_toml.rs b/codex-rs/config/src/permissions_toml.rs new file mode 100644 index 0000000000..fcc3e006b1 --- /dev/null +++ b/codex-rs/config/src/permissions_toml.rs @@ -0,0 +1,240 @@ +use std::collections::BTreeMap; + +use codex_network_proxy::NetworkDomainPermission as ProxyNetworkDomainPermission; +use codex_network_proxy::NetworkMode; +use codex_network_proxy::NetworkProxyConfig; +use codex_network_proxy::NetworkUnixSocketPermission as ProxyNetworkUnixSocketPermission; +use codex_network_proxy::normalize_host; +use codex_protocol::permissions::FileSystemAccessMode; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] +pub struct PermissionsToml { + #[serde(flatten)] + pub entries: BTreeMap, +} + +impl PermissionsToml { + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] +#[schemars(deny_unknown_fields)] +pub struct PermissionProfileToml { + pub filesystem: Option, + pub network: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] +pub struct FilesystemPermissionsToml { + #[serde(flatten)] + pub entries: BTreeMap, +} + +impl FilesystemPermissionsToml { + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema)] +#[serde(untagged)] +pub enum FilesystemPermissionToml { + Access(FileSystemAccessMode), + Scoped(BTreeMap), +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] +pub struct NetworkDomainPermissionsToml { + #[serde(flatten)] + pub entries: BTreeMap, +} + +impl NetworkDomainPermissionsToml { + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } + + pub fn allowed_domains(&self) -> Option> { + let allowed_domains: Vec = self + .entries + .iter() + .filter(|(_, permission)| matches!(permission, NetworkDomainPermissionToml::Allow)) + .map(|(pattern, _)| pattern.clone()) + .collect(); + (!allowed_domains.is_empty()).then_some(allowed_domains) + } + + pub fn denied_domains(&self) -> Option> { + let denied_domains: Vec = self + .entries + .iter() + .filter(|(_, permission)| matches!(permission, NetworkDomainPermissionToml::Deny)) + .map(|(pattern, _)| pattern.clone()) + .collect(); + (!denied_domains.is_empty()).then_some(denied_domains) + } +} + +#[derive( + Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, JsonSchema, +)] +#[serde(rename_all = "lowercase")] +pub enum NetworkDomainPermissionToml { + Allow, + Deny, +} + +impl std::fmt::Display for NetworkDomainPermissionToml { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let permission = match self { + Self::Allow => "allow", + Self::Deny => "deny", + }; + f.write_str(permission) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] +pub struct NetworkUnixSocketPermissionsToml { + #[serde(flatten)] + pub entries: BTreeMap, +} + +impl NetworkUnixSocketPermissionsToml { + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } + + pub fn allow_unix_sockets(&self) -> Vec { + self.entries + .iter() + .filter(|(_, permission)| matches!(permission, NetworkUnixSocketPermissionToml::Allow)) + .map(|(path, _)| path.clone()) + .collect() + } +} + +#[derive( + Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, JsonSchema, +)] +#[serde(rename_all = "lowercase")] +pub enum NetworkUnixSocketPermissionToml { + Allow, + None, +} + +impl std::fmt::Display for NetworkUnixSocketPermissionToml { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let permission = match self { + Self::Allow => "allow", + Self::None => "none", + }; + f.write_str(permission) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] +#[schemars(deny_unknown_fields)] +pub struct NetworkToml { + pub enabled: Option, + pub proxy_url: Option, + pub enable_socks5: Option, + pub socks_url: Option, + pub enable_socks5_udp: Option, + pub allow_upstream_proxy: Option, + pub dangerously_allow_non_loopback_proxy: Option, + pub dangerously_allow_all_unix_sockets: Option, + #[schemars(with = "Option")] + pub mode: Option, + pub domains: Option, + pub unix_sockets: Option, + pub allow_local_binding: Option, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, JsonSchema)] +#[serde(rename_all = "lowercase")] +enum NetworkModeSchema { + Limited, + Full, +} + +impl NetworkToml { + pub fn apply_to_network_proxy_config(&self, config: &mut NetworkProxyConfig) { + if let Some(enabled) = self.enabled { + config.network.enabled = enabled; + } + if let Some(proxy_url) = self.proxy_url.as_ref() { + config.network.proxy_url = proxy_url.clone(); + } + if let Some(enable_socks5) = self.enable_socks5 { + config.network.enable_socks5 = enable_socks5; + } + if let Some(socks_url) = self.socks_url.as_ref() { + config.network.socks_url = socks_url.clone(); + } + if let Some(enable_socks5_udp) = self.enable_socks5_udp { + config.network.enable_socks5_udp = enable_socks5_udp; + } + if let Some(allow_upstream_proxy) = self.allow_upstream_proxy { + config.network.allow_upstream_proxy = allow_upstream_proxy; + } + if let Some(dangerously_allow_non_loopback_proxy) = + self.dangerously_allow_non_loopback_proxy + { + config.network.dangerously_allow_non_loopback_proxy = + dangerously_allow_non_loopback_proxy; + } + if let Some(dangerously_allow_all_unix_sockets) = self.dangerously_allow_all_unix_sockets { + config.network.dangerously_allow_all_unix_sockets = dangerously_allow_all_unix_sockets; + } + if let Some(mode) = self.mode { + config.network.mode = mode; + } + if let Some(domains) = self.domains.as_ref() { + overlay_network_domain_permissions(config, domains); + } + if let Some(unix_sockets) = self.unix_sockets.as_ref() { + let mut proxy_unix_sockets = config.network.unix_sockets.take().unwrap_or_default(); + for (path, permission) in &unix_sockets.entries { + let permission = match permission { + NetworkUnixSocketPermissionToml::Allow => { + ProxyNetworkUnixSocketPermission::Allow + } + NetworkUnixSocketPermissionToml::None => ProxyNetworkUnixSocketPermission::None, + }; + proxy_unix_sockets.entries.insert(path.clone(), permission); + } + config.network.unix_sockets = + (!proxy_unix_sockets.entries.is_empty()).then_some(proxy_unix_sockets); + } + if let Some(allow_local_binding) = self.allow_local_binding { + config.network.allow_local_binding = allow_local_binding; + } + } + + pub fn to_network_proxy_config(&self) -> NetworkProxyConfig { + let mut config = NetworkProxyConfig::default(); + self.apply_to_network_proxy_config(&mut config); + config + } +} + +pub fn overlay_network_domain_permissions( + config: &mut NetworkProxyConfig, + domains: &NetworkDomainPermissionsToml, +) { + for (pattern, permission) in &domains.entries { + let permission = match permission { + NetworkDomainPermissionToml::Allow => ProxyNetworkDomainPermission::Allow, + NetworkDomainPermissionToml::Deny => ProxyNetworkDomainPermission::Deny, + }; + config + .network + .upsert_domain_permission(pattern.clone(), permission, normalize_host); + } +} diff --git a/codex-rs/core/src/config/profile.rs b/codex-rs/config/src/profile_toml.rs similarity index 92% rename from codex-rs/core/src/config/profile.rs rename to codex-rs/config/src/profile_toml.rs index 7a83428b89..69215c0440 100644 --- a/codex-rs/core/src/config/profile.rs +++ b/codex-rs/config/src/profile_toml.rs @@ -3,10 +3,11 @@ use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; -use crate::config::ToolsToml; -use codex_config::types::ApprovalsReviewer; -use codex_config::types::Personality; -use codex_config::types::WindowsToml; +use crate::config_toml::ToolsToml; +use crate::types::AnalyticsConfigToml; +use crate::types::ApprovalsReviewer; +use crate::types::Personality; +use crate::types::WindowsToml; use codex_features::FeaturesToml; use codex_protocol::config_types::ReasoningSummary; use codex_protocol::config_types::SandboxMode; @@ -57,13 +58,13 @@ pub struct ConfigProfile { pub tools_view_image: Option, pub tools: Option, pub web_search: Option, - pub analytics: Option, + pub analytics: Option, #[serde(default)] pub windows: Option, /// Optional feature toggles scoped to this profile. #[serde(default)] // Injects known feature keys into the schema and forbids unknown keys. - #[schemars(schema_with = "crate::config::schema::features_schema")] + #[schemars(schema_with = "crate::schema::features_schema")] pub features: Option, pub oss_provider: Option, } diff --git a/codex-rs/config/src/requirements_exec_policy.rs b/codex-rs/config/src/requirements_exec_policy.rs index 64d60f8814..30a1a0e694 100644 --- a/codex-rs/config/src/requirements_exec_policy.rs +++ b/codex-rs/config/src/requirements_exec_policy.rs @@ -1,9 +1,9 @@ use codex_execpolicy::Decision; +use codex_execpolicy::PatternToken; use codex_execpolicy::Policy; -use codex_execpolicy::rule::PatternToken; -use codex_execpolicy::rule::PrefixPattern; -use codex_execpolicy::rule::PrefixRule; -use codex_execpolicy::rule::RuleRef; +use codex_execpolicy::PrefixPattern; +use codex_execpolicy::PrefixRule; +use codex_execpolicy::RuleRef; use multimap::MultiMap; use serde::Deserialize; use std::sync::Arc; diff --git a/codex-rs/config/src/schema.rs b/codex-rs/config/src/schema.rs new file mode 100644 index 0000000000..72252d7a21 --- /dev/null +++ b/codex-rs/config/src/schema.rs @@ -0,0 +1,100 @@ +use crate::config_toml::ConfigToml; +use crate::types::RawMcpServerConfig; +use codex_features::FEATURES; +use codex_features::legacy_feature_keys; +use schemars::r#gen::SchemaGenerator; +use schemars::r#gen::SchemaSettings; +use schemars::schema::InstanceType; +use schemars::schema::ObjectValidation; +use schemars::schema::RootSchema; +use schemars::schema::Schema; +use schemars::schema::SchemaObject; +use serde_json::Map; +use serde_json::Value; +use std::path::Path; + +/// Schema for the `[features]` map with known + legacy keys only. +pub fn features_schema(schema_gen: &mut SchemaGenerator) -> Schema { + let mut object = SchemaObject { + instance_type: Some(InstanceType::Object.into()), + ..Default::default() + }; + + let mut validation = ObjectValidation::default(); + for feature in FEATURES { + if feature.id == codex_features::Feature::Artifact { + continue; + } + validation + .properties + .insert(feature.key.to_string(), schema_gen.subschema_for::()); + } + for legacy_key in legacy_feature_keys() { + validation + .properties + .insert(legacy_key.to_string(), schema_gen.subschema_for::()); + } + validation.additional_properties = Some(Box::new(Schema::Bool(false))); + object.object = Some(Box::new(validation)); + + Schema::Object(object) +} + +/// Schema for the `[mcp_servers]` map using the raw input shape. +pub fn mcp_servers_schema(schema_gen: &mut SchemaGenerator) -> Schema { + let mut object = SchemaObject { + instance_type: Some(InstanceType::Object.into()), + ..Default::default() + }; + + let validation = ObjectValidation { + additional_properties: Some(Box::new(schema_gen.subschema_for::())), + ..Default::default() + }; + object.object = Some(Box::new(validation)); + + Schema::Object(object) +} + +/// Build the config schema for `config.toml`. +pub fn config_schema() -> RootSchema { + SchemaSettings::draft07() + .with(|settings| { + settings.option_add_null_type = false; + }) + .into_generator() + .into_root_schema_for::() +} + +/// Canonicalize a JSON value by sorting its keys. +pub fn canonicalize(value: &Value) -> Value { + match value { + Value::Array(items) => Value::Array(items.iter().map(canonicalize).collect()), + Value::Object(map) => { + let mut entries: Vec<_> = map.iter().collect(); + entries.sort_by(|(left, _), (right, _)| left.cmp(right)); + let mut sorted = Map::with_capacity(map.len()); + for (key, child) in entries { + sorted.insert(key.clone(), canonicalize(child)); + } + Value::Object(sorted) + } + _ => value.clone(), + } +} + +/// Render the config schema as pretty-printed JSON. +pub fn config_schema_json() -> anyhow::Result> { + let schema = config_schema(); + let value = serde_json::to_value(schema)?; + let value = canonicalize(&value); + let json = serde_json::to_vec_pretty(&value)?; + Ok(json) +} + +/// Write the config schema fixture to disk. +pub fn write_config_schema(out_path: &Path) -> anyhow::Result<()> { + let json = config_schema_json()?; + std::fs::write(out_path, json)?; + Ok(()) +} diff --git a/codex-rs/config/src/types.rs b/codex-rs/config/src/types.rs index c523833525..f26d336018 100644 --- a/codex-rs/config/src/types.rs +++ b/codex-rs/config/src/types.rs @@ -36,6 +36,36 @@ const fn default_enabled() -> bool { true } +/// Determine where Codex should store CLI auth credentials. +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "lowercase")] +pub enum AuthCredentialsStoreMode { + #[default] + /// Persist credentials in CODEX_HOME/auth.json. + File, + /// Persist credentials in the keyring. Fail if unavailable. + Keyring, + /// Use keyring when available; otherwise, fall back to a file in CODEX_HOME. + Auto, + /// Store credentials in memory only for the current process. + Ephemeral, +} + +/// Determine where Codex should store and read MCP credentials. +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "lowercase")] +pub enum OAuthCredentialsStoreMode { + /// `Keyring` when available; otherwise, `File`. + /// Credentials stored in the keyring will only be readable by Codex unless the user explicitly grants access via OS-level keyring access. + #[default] + Auto, + /// CODEX_HOME/.credentials.json + /// This file will be readable to Codex and other applications running as the same user. + File, + /// Keyring when available, otherwise fail. + Keyring, +} + #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema)] #[serde(rename_all = "kebab-case")] pub enum WindowsSandboxModeToml { diff --git a/codex-rs/core/Cargo.toml b/codex-rs/core/Cargo.toml index 0a9dfb521e..d2cbad83ef 100644 --- a/codex-rs/core/Cargo.toml +++ b/codex-rs/core/Cargo.toml @@ -93,7 +93,6 @@ rmcp = { workspace = true, default-features = false, features = [ "schemars", "server", ] } -schemars = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } sha1 = { workspace = true } diff --git a/codex-rs/core/README.md b/codex-rs/core/README.md index 63cd3b6f36..d8ea2ac984 100644 --- a/codex-rs/core/README.md +++ b/codex-rs/core/README.md @@ -41,7 +41,8 @@ too old to support `--argv0`, the helper keeps using system bubblewrap and switches to a no-`--argv0` compatibility path for the inner re-exec. If `bwrap` is missing, it falls back to the vendored bubblewrap path compiled into the binary and Codex surfaces a startup warning through its normal notification -path instead of printing directly from the sandbox helper. +path instead of printing directly from the sandbox helper. Codex also surfaces +a startup warning when bubblewrap cannot create user namespaces. ### Windows diff --git a/codex-rs/core/config.schema.json b/codex-rs/core/config.schema.json index 7f5be535ea..e4e7ff85b7 100644 --- a/codex-rs/core/config.schema.json +++ b/codex-rs/core/config.schema.json @@ -362,6 +362,9 @@ "connectors": { "type": "boolean" }, + "debug_hide_spawn_agent_metadata": { + "type": "boolean" + }, "default_mode_request_user_input": { "type": "boolean" }, @@ -437,6 +440,9 @@ "realtime_conversation": { "type": "boolean" }, + "remote_control": { + "type": "boolean" + }, "remote_models": { "type": "boolean" }, @@ -2069,6 +2075,9 @@ "connectors": { "type": "boolean" }, + "debug_hide_spawn_agent_metadata": { + "type": "boolean" + }, "default_mode_request_user_input": { "type": "boolean" }, @@ -2144,6 +2153,9 @@ "realtime_conversation": { "type": "boolean" }, + "remote_control": { + "type": "boolean" + }, "remote_models": { "type": "boolean" }, @@ -2417,7 +2429,7 @@ "$ref": "#/definitions/Notice" } ], - "description": "Collection of in-product notices (different from notifications) See [`codex_config::types::Notice`] for more details" + "description": "Collection of in-product notices (different from notifications) See [`crate::types::Notice`] for more details" }, "notify": { "default": null, diff --git a/codex-rs/core/src/agent/control.rs b/codex-rs/core/src/agent/control.rs index 255c6bc2c3..529615a7a5 100644 --- a/codex-rs/core/src/agent/control.rs +++ b/codex-rs/core/src/agent/control.rs @@ -730,6 +730,15 @@ impl AgentControl { self.state.agent_metadata_for_thread(agent_id) } + pub(crate) async fn list_live_agent_subtree_thread_ids( + &self, + agent_id: ThreadId, + ) -> CodexResult> { + let mut thread_ids = vec![agent_id]; + thread_ids.extend(self.live_thread_spawn_descendants(agent_id).await?); + Ok(thread_ids) + } + pub(crate) async fn get_agent_config_snapshot( &self, agent_id: ThreadId, diff --git a/codex-rs/core/src/agent/control_tests.rs b/codex-rs/core/src/agent/control_tests.rs index d254a49343..54693228fa 100644 --- a/codex-rs/core/src/agent/control_tests.rs +++ b/codex-rs/core/src/agent/control_tests.rs @@ -256,6 +256,7 @@ async fn get_status_returns_not_found_without_manager() { async fn on_event_updates_status_from_task_started() { let status = agent_status_from_event(&EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, })); @@ -267,6 +268,8 @@ async fn on_event_updates_status_from_task_complete() { let status = agent_status_from_event(&EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: Some("done".to_string()), + completed_at: None, + duration_ms: None, })); let expected = AgentStatus::Completed(Some("done".to_string())); assert_eq!(status, Some(expected)); @@ -288,6 +291,8 @@ async fn on_event_updates_status_from_turn_aborted() { let status = agent_status_from_event(&EventMsg::TurnAborted(TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, })); let expected = AgentStatus::Interrupted; @@ -1200,6 +1205,8 @@ async fn multi_agent_v2_completion_ignores_dead_direct_parent() { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: tester_turn.sub_id.clone(), last_agent_message: Some("done".to_string()), + completed_at: None, + duration_ms: None, }), ) .await; @@ -1284,6 +1291,8 @@ async fn multi_agent_v2_completion_queues_message_for_direct_parent() { EventMsg::TurnComplete(TurnCompleteEvent { turn_id: tester_turn.sub_id.clone(), last_agent_message: Some("done".to_string()), + completed_at: None, + duration_ms: None, }), ) .await; @@ -1678,6 +1687,132 @@ async fn resume_agent_from_rollout_reads_archived_rollout_path() { .expect("resumed child shutdown should succeed"); } +#[tokio::test] +async fn list_agent_subtree_thread_ids_includes_anonymous_and_closed_descendants() { + let harness = AgentControlHarness::new().await; + let (parent_thread_id, _parent_thread) = harness.start_thread().await; + let worker_path = AgentPath::root().join("worker").expect("worker path"); + let reviewer_path = AgentPath::root().join("reviewer").expect("reviewer path"); + + let worker_thread_id = harness + .control + .spawn_agent( + harness.config.clone(), + text_input("hello worker"), + Some(SessionSource::SubAgent(SubAgentSource::ThreadSpawn { + parent_thread_id, + depth: 1, + agent_path: Some(worker_path.clone()), + agent_nickname: None, + agent_role: Some("worker".to_string()), + })), + ) + .await + .expect("worker spawn should succeed"); + let worker_child_thread_id = harness + .control + .spawn_agent( + harness.config.clone(), + text_input("hello worker child"), + Some(SessionSource::SubAgent(SubAgentSource::ThreadSpawn { + parent_thread_id: worker_thread_id, + depth: 2, + agent_path: Some( + worker_path + .join("child") + .expect("worker child path should be valid"), + ), + agent_nickname: None, + agent_role: Some("worker".to_string()), + })), + ) + .await + .expect("worker child spawn should succeed"); + let no_path_child_thread_id = harness + .control + .spawn_agent( + harness.config.clone(), + text_input("hello anonymous child"), + Some(SessionSource::SubAgent(SubAgentSource::ThreadSpawn { + parent_thread_id: worker_thread_id, + depth: 2, + agent_path: None, + agent_nickname: None, + agent_role: Some("worker".to_string()), + })), + ) + .await + .expect("no-path child spawn should succeed"); + let no_path_grandchild_thread_id = harness + .control + .spawn_agent( + harness.config.clone(), + text_input("hello anonymous grandchild"), + Some(SessionSource::SubAgent(SubAgentSource::ThreadSpawn { + parent_thread_id: no_path_child_thread_id, + depth: 3, + agent_path: None, + agent_nickname: None, + agent_role: Some("worker".to_string()), + })), + ) + .await + .expect("no-path grandchild spawn should succeed"); + let _reviewer_thread_id = harness + .control + .spawn_agent( + harness.config.clone(), + text_input("hello reviewer"), + Some(SessionSource::SubAgent(SubAgentSource::ThreadSpawn { + parent_thread_id, + depth: 1, + agent_path: Some(reviewer_path), + agent_nickname: None, + agent_role: Some("reviewer".to_string()), + })), + ) + .await + .expect("reviewer spawn should succeed"); + + let _ = harness + .control + .shutdown_live_agent(no_path_grandchild_thread_id) + .await + .expect("no-path grandchild shutdown should succeed"); + + let mut worker_subtree_thread_ids = harness + .manager + .list_agent_subtree_thread_ids(worker_thread_id) + .await + .expect("worker subtree thread ids should load"); + worker_subtree_thread_ids.sort_by_key(ToString::to_string); + let mut expected_worker_subtree_thread_ids = vec![ + worker_thread_id, + worker_child_thread_id, + no_path_child_thread_id, + no_path_grandchild_thread_id, + ]; + expected_worker_subtree_thread_ids.sort_by_key(ToString::to_string); + assert_eq!( + worker_subtree_thread_ids, + expected_worker_subtree_thread_ids + ); + + let mut no_path_child_subtree_thread_ids = harness + .manager + .list_agent_subtree_thread_ids(no_path_child_thread_id) + .await + .expect("no-path subtree thread ids should load"); + no_path_child_subtree_thread_ids.sort_by_key(ToString::to_string); + let mut expected_no_path_child_subtree_thread_ids = + vec![no_path_child_thread_id, no_path_grandchild_thread_id]; + expected_no_path_child_subtree_thread_ids.sort_by_key(ToString::to_string); + assert_eq!( + no_path_child_subtree_thread_ids, + expected_no_path_child_subtree_thread_ids + ); +} + #[tokio::test] async fn shutdown_agent_tree_closes_live_descendants() { let harness = AgentControlHarness::new().await; diff --git a/codex-rs/core/src/agent/registry.rs b/codex-rs/core/src/agent/registry.rs index c600d5c64b..1acd73085f 100644 --- a/codex-rs/core/src/agent/registry.rs +++ b/codex-rs/core/src/agent/registry.rs @@ -220,7 +220,7 @@ impl AgentRegistry { } else { active_agents.used_agent_nicknames.clear(); active_agents.nickname_reset_count += 1; - if let Some(metrics) = codex_otel::metrics::global() { + if let Some(metrics) = codex_otel::global() { let _ = metrics.counter( "codex.multi_agent.nickname_pool_reset", /*inc*/ 1, diff --git a/codex-rs/core/src/agent/role.rs b/codex-rs/core/src/agent/role.rs index b7d7b55ab1..83c2b1843b 100644 --- a/codex-rs/core/src/agent/role.rs +++ b/codex-rs/core/src/agent/role.rs @@ -17,6 +17,7 @@ use crate::config_loader::ConfigLayerStackOrdering; use crate::config_loader::resolve_relative_paths_in_config_toml; use anyhow::anyhow; use codex_app_server_protocol::ConfigLayerSource; +use codex_config::config_toml::ConfigToml; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::path::Path; @@ -63,6 +64,12 @@ async fn apply_role_to_config_inner( return Ok(()); }; let role_layer_toml = load_role_layer_toml(config, config_file, is_built_in, role_name).await?; + if role_layer_toml + .as_table() + .is_some_and(toml::map::Map::is_empty) + { + return Ok(()); + } let (preserve_current_profile, preserve_current_provider) = preservation_policy(config, &role_layer_toml); @@ -221,7 +228,7 @@ mod reload { fn deserialize_effective_config( config: &Config, config_layer_stack: &ConfigLayerStack, - ) -> anyhow::Result { + ) -> anyhow::Result { Ok(deserialize_config_toml_with_base( config_layer_stack.effective_config(), &config.codex_home, diff --git a/codex-rs/core/src/agent/role_tests.rs b/codex-rs/core/src/agent/role_tests.rs index 5b3941ebda..d68376ddd6 100644 --- a/codex-rs/core/src/agent/role_tests.rs +++ b/codex-rs/core/src/agent/role_tests.rs @@ -87,6 +87,22 @@ async fn apply_explorer_role_sets_model_and_adds_session_flags_layer() { assert_eq!(session_flags_layer_count(&config), before_layers + 1); } +#[tokio::test] +async fn apply_empty_explorer_role_preserves_current_model_and_reasoning_effort() { + let (_home, mut config) = test_config_with_cli_overrides(Vec::new()).await; + let before_layers = session_flags_layer_count(&config); + config.model = Some("gpt-5.4-mini".to_string()); + config.model_reasoning_effort = Some(ReasoningEffort::High); + + apply_role_to_config(&mut config, Some("explorer")) + .await + .expect("explorer role should apply"); + + assert_eq!(config.model.as_deref(), Some("gpt-5.4-mini")); + assert_eq!(config.model_reasoning_effort, Some(ReasoningEffort::High)); + assert_eq!(session_flags_layer_count(&config), before_layers); +} + #[tokio::test] async fn apply_role_returns_unavailable_for_missing_user_role_file() { let (_home, mut config) = test_config_with_cli_overrides(Vec::new()).await; diff --git a/codex-rs/core/src/apply_patch_tests.rs b/codex-rs/core/src/apply_patch_tests.rs index 1b9e722d5a..c0190c3708 100644 --- a/codex-rs/core/src/apply_patch_tests.rs +++ b/codex-rs/core/src/apply_patch_tests.rs @@ -1,4 +1,5 @@ use super::*; +use core_test_support::PathBufExt; use pretty_assertions::assert_eq; use tempfile::tempdir; @@ -6,14 +7,14 @@ use tempfile::tempdir; #[test] fn convert_apply_patch_maps_add_variant() { let tmp = tempdir().expect("tmp"); - let p = tmp.path().join("a.txt"); + let p = tmp.path().join("a.txt").abs(); // Create an action with a single Add change let action = ApplyPatchAction::new_add_for_test(&p, "hello".to_string()); let got = convert_apply_patch_to_protocol(&action); assert_eq!( - got.get(&p), + got.get(p.as_path()), Some(&FileChange::Add { content: "hello".to_string() }) diff --git a/codex-rs/core/src/apps/render.rs b/codex-rs/core/src/apps/render.rs index 9b93913d78..fe23a09f8d 100644 --- a/codex-rs/core/src/apps/render.rs +++ b/codex-rs/core/src/apps/render.rs @@ -1,5 +1,5 @@ use codex_app_server_protocol::AppInfo; -use codex_mcp::mcp::CODEX_APPS_MCP_SERVER_NAME; +use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; use codex_protocol::protocol::APPS_INSTRUCTIONS_CLOSE_TAG; use codex_protocol::protocol::APPS_INSTRUCTIONS_OPEN_TAG; diff --git a/codex-rs/core/src/bin/config_schema.rs b/codex-rs/core/src/bin/config_schema.rs index 8d33df42e1..f92ce62307 100644 --- a/codex-rs/core/src/bin/config_schema.rs +++ b/codex-rs/core/src/bin/config_schema.rs @@ -15,6 +15,6 @@ fn main() -> Result<()> { let out_path = args .out .unwrap_or_else(|| PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("config.schema.json")); - codex_core::config::schema::write_config_schema(&out_path)?; + codex_config::schema::write_config_schema(&out_path)?; Ok(()) } diff --git a/codex-rs/core/src/client.rs b/codex-rs/core/src/client.rs index d77e36870a..80ad24be5b 100644 --- a/codex-rs/core/src/client.rs +++ b/codex-rs/core/src/client.rs @@ -31,12 +31,15 @@ use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; +use codex_api::ApiError; use codex_api::CompactClient as ApiCompactClient; use codex_api::CompactionInput as ApiCompactionInput; +use codex_api::Compression; use codex_api::MemoriesClient as ApiMemoriesClient; use codex_api::MemorySummarizeInput as ApiMemorySummarizeInput; use codex_api::MemorySummarizeOutput as ApiMemorySummarizeOutput; use codex_api::RawMemory as ApiRawMemory; +use codex_api::Reasoning; use codex_api::RequestTelemetry; use codex_api::ReqwestTransport; use codex_api::ResponseCreateWsRequest; @@ -45,15 +48,12 @@ use codex_api::ResponsesClient as ApiResponsesClient; use codex_api::ResponsesOptions as ApiResponsesOptions; use codex_api::ResponsesWebsocketClient as ApiWebSocketResponsesClient; use codex_api::ResponsesWebsocketConnection as ApiWebSocketConnection; +use codex_api::ResponsesWsRequest; use codex_api::SseTelemetry; use codex_api::TransportError; use codex_api::WebsocketTelemetry; use codex_api::build_conversation_headers; -use codex_api::common::Reasoning; -use codex_api::common::ResponsesWsRequest; use codex_api::create_text_param_for_request; -use codex_api::error::ApiError; -use codex_api::requests::responses::Compression; use codex_api::response_create_client_metadata; use codex_app_server_protocol::AuthMode; use codex_login::AuthManager; @@ -98,8 +98,8 @@ use crate::client_common::ResponseEvent; use crate::client_common::ResponseStream; use crate::flags::CODEX_RS_SSE_FIXTURE; use crate::util::emit_feedback_auth_recovery_tags; -use codex_api::api_bridge::CoreAuthProvider; -use codex_api::api_bridge::map_api_error; +use codex_api::CoreAuthProvider; +use codex_api::map_api_error; use codex_feedback::FeedbackRequestTags; use codex_feedback::emit_feedback_request_tags_with_auth_env; use codex_login::api_bridge::auth_provider_from_auth; @@ -118,6 +118,7 @@ use codex_response_debug_context::telemetry_api_error_message; use codex_response_debug_context::telemetry_transport_error_message; pub const OPENAI_BETA_HEADER: &str = "OpenAI-Beta"; +pub const X_CODEX_INSTALLATION_ID_HEADER: &str = "x-codex-installation-id"; pub const X_CODEX_TURN_STATE_HEADER: &str = "x-codex-turn-state"; pub const X_CODEX_TURN_METADATA_HEADER: &str = "x-codex-turn-metadata"; pub const X_CODEX_PARENT_THREAD_ID_HEADER: &str = "x-codex-parent-thread-id"; @@ -142,6 +143,7 @@ struct ModelClientState { auth_manager: Option>, conversation_id: ThreadId, window_generation: AtomicU64, + installation_id: String, provider: ModelProviderInfo, auth_env_telemetry: AuthEnvTelemetry, session_source: SessionSource, @@ -263,6 +265,7 @@ impl ModelClient { pub fn new( auth_manager: Option>, conversation_id: ThreadId, + installation_id: String, provider: ModelProviderInfo, session_source: SessionSource, model_verbosity: Option, @@ -280,6 +283,7 @@ impl ModelClient { auth_manager, conversation_id, window_generation: AtomicU64::new(0), + installation_id, provider, auth_env_telemetry, session_source, @@ -399,7 +403,11 @@ impl ModelClient { ApiCompactClient::new(transport, client_setup.api_provider, client_setup.api_auth) .with_telemetry(Some(request_telemetry)); - let instructions = prompt.base_instructions.text.clone(); + let instructions = prompt + .base_instructions + .as_ref() + .map(|base_instructions| base_instructions.text.clone()) + .unwrap_or_default(); let input = prompt.get_formatted_input(); let tools = create_tools_json_for_responses_api(&prompt.tools)?; let reasoning = Self::build_reasoning(model_info, effort, summary); @@ -425,7 +433,11 @@ impl ModelClient { text, }; - let mut extra_headers = self.build_responses_identity_headers(); + let mut extra_headers = ApiHeaderMap::new(); + if let Ok(header_value) = HeaderValue::from_str(&self.state.installation_id) { + extra_headers.insert(X_CODEX_INSTALLATION_ID_HEADER, header_value); + } + extra_headers.extend(self.build_responses_identity_headers()); extra_headers.extend(build_conversation_headers(Some( self.state.conversation_id.to_string(), ))); @@ -511,6 +523,10 @@ impl ModelClient { turn_metadata_header: Option<&str>, ) -> HashMap { let mut client_metadata = HashMap::new(); + client_metadata.insert( + X_CODEX_INSTALLATION_ID_HEADER.to_string(), + self.state.installation_id.clone(), + ); client_metadata.insert( X_CODEX_WINDOW_ID_HEADER.to_string(), self.current_window_id(), @@ -755,7 +771,10 @@ impl ModelClientSession { summary: ReasoningSummaryConfig, service_tier: Option, ) -> Result { - let instructions = &prompt.base_instructions.text; + let instructions = prompt + .base_instructions + .as_ref() + .map(|base_instructions| base_instructions.text.clone()); let input = prompt.get_formatted_input(); let tools = create_tools_json_for_responses_api(&prompt.tools)?; let default_reasoning_effort = model_info.default_reasoning_level; @@ -794,7 +813,7 @@ impl ModelClientSession { let prompt_cache_key = Some(self.client.state.conversation_id.to_string()); let request = ResponsesApiRequest { model: model_info.slug.clone(), - instructions: instructions.clone(), + instructions, input, tools, tool_choice: "auto".to_string(), @@ -810,6 +829,10 @@ impl ModelClientSession { }, prompt_cache_key, text, + client_metadata: Some(HashMap::from([( + X_CODEX_INSTALLATION_ID_HEADER.to_string(), + self.client.state.installation_id.clone(), + )])), }; Ok(request) } diff --git a/codex-rs/core/src/client_common.rs b/codex-rs/core/src/client_common.rs index cb50ccac39..a90d75bf01 100644 --- a/codex-rs/core/src/client_common.rs +++ b/codex-rs/core/src/client_common.rs @@ -1,4 +1,4 @@ -pub use codex_api::common::ResponseEvent; +pub use codex_api::ResponseEvent; use codex_config::types::Personality; use codex_protocol::error::Result; use codex_protocol::models::BaseInstructions; @@ -23,7 +23,7 @@ pub const REVIEW_EXIT_INTERRUPTED_TMPL: &str = include_str!("../templates/review/exit_interrupted.xml"); /// API request payload for a single model turn -#[derive(Default, Debug, Clone)] +#[derive(Debug, Clone)] pub struct Prompt { /// Conversation context input items. pub input: Vec, @@ -35,7 +35,7 @@ pub struct Prompt { /// Whether parallel tool calls are permitted for this prompt. pub(crate) parallel_tool_calls: bool, - pub base_instructions: BaseInstructions, + pub base_instructions: Option, /// Optionally specify the personality of the model. pub personality: Option, @@ -44,6 +44,19 @@ pub struct Prompt { pub output_schema: Option, } +impl Default for Prompt { + fn default() -> Self { + Self { + input: Vec::new(), + tools: Vec::new(), + parallel_tool_calls: false, + base_instructions: Some(BaseInstructions::default()), + personality: None, + output_schema: None, + } + } +} + impl Prompt { pub(crate) fn get_formatted_input(&self) -> Vec { let mut input = self.input.clone(); diff --git a/codex-rs/core/src/client_common_tests.rs b/codex-rs/core/src/client_common_tests.rs index 16a37796ad..4d4b5af4ff 100644 --- a/codex-rs/core/src/client_common_tests.rs +++ b/codex-rs/core/src/client_common_tests.rs @@ -1,6 +1,6 @@ +use codex_api::OpenAiVerbosity; use codex_api::ResponsesApiRequest; -use codex_api::common::OpenAiVerbosity; -use codex_api::common::TextControls; +use codex_api::TextControls; use codex_api::create_text_param_for_request; use codex_protocol::config_types::ServiceTier; use codex_protocol::models::FunctionCallOutputPayload; @@ -14,7 +14,7 @@ fn serializes_text_verbosity_when_set() { let tools: Vec = vec![]; let req = ResponsesApiRequest { model: "gpt-5.1".to_string(), - instructions: "i".to_string(), + instructions: Some("i".to_string()), input, tools, tool_choice: "auto".to_string(), @@ -29,6 +29,7 @@ fn serializes_text_verbosity_when_set() { verbosity: Some(OpenAiVerbosity::Low), format: None, }), + client_metadata: None, }; let v = serde_json::to_value(&req).expect("json"); @@ -57,7 +58,7 @@ fn serializes_text_schema_with_strict_format() { let req = ResponsesApiRequest { model: "gpt-5.1".to_string(), - instructions: "i".to_string(), + instructions: Some("i".to_string()), input, tools, tool_choice: "auto".to_string(), @@ -69,6 +70,7 @@ fn serializes_text_schema_with_strict_format() { prompt_cache_key: None, service_tier: None, text: Some(text_controls), + client_metadata: None, }; let v = serde_json::to_value(&req).expect("json"); @@ -94,7 +96,7 @@ fn omits_text_when_not_set() { let tools: Vec = vec![]; let req = ResponsesApiRequest { model: "gpt-5.1".to_string(), - instructions: "i".to_string(), + instructions: Some("i".to_string()), input, tools, tool_choice: "auto".to_string(), @@ -106,6 +108,7 @@ fn omits_text_when_not_set() { prompt_cache_key: None, service_tier: None, text: None, + client_metadata: None, }; let v = serde_json::to_value(&req).expect("json"); @@ -116,7 +119,7 @@ fn omits_text_when_not_set() { fn serializes_flex_service_tier_when_set() { let req = ResponsesApiRequest { model: "gpt-5.1".to_string(), - instructions: "i".to_string(), + instructions: Some("i".to_string()), input: vec![], tools: vec![], tool_choice: "auto".to_string(), @@ -128,6 +131,7 @@ fn serializes_flex_service_tier_when_set() { prompt_cache_key: None, service_tier: Some(ServiceTier::Flex.to_string()), text: None, + client_metadata: None, }; let v = serde_json::to_value(&req).expect("json"); diff --git a/codex-rs/core/src/client_tests.rs b/codex-rs/core/src/client_tests.rs index a4fb681201..2fd5f04f95 100644 --- a/codex-rs/core/src/client_tests.rs +++ b/codex-rs/core/src/client_tests.rs @@ -2,11 +2,12 @@ use super::AuthRequestTelemetryContext; use super::ModelClient; use super::PendingUnauthorizedRetry; use super::UnauthorizedRecoveryExecution; +use super::X_CODEX_INSTALLATION_ID_HEADER; use super::X_CODEX_PARENT_THREAD_ID_HEADER; use super::X_CODEX_TURN_METADATA_HEADER; use super::X_CODEX_WINDOW_ID_HEADER; use super::X_OPENAI_SUBAGENT_HEADER; -use codex_api::api_bridge::CoreAuthProvider; +use codex_api::CoreAuthProvider; use codex_app_server_protocol::AuthMode; use codex_model_provider_info::WireApi; use codex_model_provider_info::create_oss_provider_with_base_url; @@ -23,6 +24,7 @@ fn test_model_client(session_source: SessionSource) -> ModelClient { ModelClient::new( /*auth_manager*/ None, ThreadId::new(), + /*installation_id*/ "11111111-1111-4111-8111-111111111111".to_string(), provider, session_source, /*model_verbosity*/ None, @@ -107,6 +109,10 @@ fn build_ws_client_metadata_includes_window_lineage_and_turn_metadata() { assert_eq!( client_metadata, std::collections::HashMap::from([ + ( + X_CODEX_INSTALLATION_ID_HEADER.to_string(), + "11111111-1111-4111-8111-111111111111".to_string(), + ), ( X_CODEX_WINDOW_ID_HEADER.to_string(), format!("{conversation_id}:1"), diff --git a/codex-rs/core/src/codex.rs b/codex-rs/core/src/codex.rs index 758b592047..dce76bf3d2 100644 --- a/codex-rs/core/src/codex.rs +++ b/codex-rs/core/src/codex.rs @@ -24,6 +24,7 @@ use crate::compact_remote::run_inline_remote_auto_compact_task; use crate::config::ManagedFeatures; use crate::connectors; use crate::exec_policy::ExecPolicyManager; +use crate::installation_id::resolve_installation_id; use crate::parse_turn_item; use crate::path_utils::normalize_for_native_workdir; use crate::realtime_conversation::RealtimeConversationManager; @@ -54,6 +55,7 @@ use codex_analytics::SubAgentThreadStartedInput; use codex_analytics::build_track_events_context; use codex_app_server_protocol::McpServerElicitationRequest; use codex_app_server_protocol::McpServerElicitationRequestParams; +use codex_config::types::OAuthCredentialsStoreMode; use codex_exec_server::Environment; use codex_exec_server::EnvironmentManager; use codex_features::FEATURES; @@ -69,11 +71,11 @@ use codex_login::AuthManager; use codex_login::CodexAuth; use codex_login::auth_env_telemetry::collect_auth_env_telemetry; use codex_login::default_client::originator; -use codex_mcp::mcp_connection_manager::McpConnectionManager; -use codex_mcp::mcp_connection_manager::SandboxState; -use codex_mcp::mcp_connection_manager::ToolInfo as McpToolInfo; -use codex_mcp::mcp_connection_manager::codex_apps_tools_cache_key; -use codex_mcp::mcp_connection_manager::filter_non_codex_apps_mcp_tools_only; +use codex_mcp::McpConnectionManager; +use codex_mcp::SandboxState; +use codex_mcp::ToolInfo as McpToolInfo; +use codex_mcp::codex_apps_tools_cache_key; +use codex_mcp::filter_non_codex_apps_mcp_tools_only; #[cfg(test)] use codex_models_manager::collaboration_mode_presets::CollaborationModesConfig; use codex_models_manager::manager::ModelsManager; @@ -128,7 +130,6 @@ use codex_protocol::request_permissions::RequestPermissionsResponse; use codex_protocol::request_user_input::RequestUserInputArgs; use codex_protocol::request_user_input::RequestUserInputResponse; use codex_rmcp_client::ElicitationResponse; -use codex_rmcp_client::OAuthCredentialsStoreMode; use codex_rollout::state_db; use codex_shell_command::parse_command::parse_command; use codex_terminal_detection::user_agent; @@ -149,7 +150,6 @@ use rmcp::model::PaginatedRequestParams; use rmcp::model::ReadResourceRequestParams; use rmcp::model::ReadResourceResult; use rmcp::model::RequestId; -use serde_json; use serde_json::Value; use tokio::sync::Mutex; use tokio::sync::RwLock; @@ -322,12 +322,12 @@ use crate::util::backoff; use crate::windows_sandbox::WindowsSandboxLevelExt; use codex_async_utils::OrCancelExt; use codex_git_utils::get_git_repo_root; -use codex_mcp::mcp::CODEX_APPS_MCP_SERVER_NAME; -use codex_mcp::mcp::auth::compute_auth_statuses; -use codex_mcp::mcp::with_codex_apps_mcp; +use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; +use codex_mcp::compute_auth_statuses; +use codex_mcp::with_codex_apps_mcp; use codex_otel::SessionTelemetry; +use codex_otel::THREAD_STARTED_METRIC; use codex_otel::TelemetryAuthMode; -use codex_otel::metrics::names::THREAD_STARTED_METRIC; use codex_protocol::config_types::CollaborationMode; use codex_protocol::config_types::Personality; use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig; @@ -407,8 +407,6 @@ pub(crate) type SessionLoopTermination = Shared>; pub struct CodexSpawnOk { pub codex: Codex, pub thread_id: ThreadId, - #[deprecated(note = "use thread_id")] - pub conversation_id: ThreadId, } pub(crate) struct CodexSpawnArgs { @@ -536,7 +534,11 @@ impl Codex { config.startup_warnings.push(message); } - let user_instructions = get_user_instructions(&config).await; + let environment = environment_manager + .current() + .await + .map_err(|err| CodexErr::Fatal(format!("failed to create environment: {err}")))?; + let user_instructions = get_user_instructions(&config, environment.as_deref()).await; let exec_policy = if crate::guardian::is_guardian_reviewer_source(&session_source) { // Guardian review should rely on the built-in shell safety checks, @@ -577,11 +579,15 @@ impl Codex { let model_info = models_manager .get_model_info(model.as_str(), &config.to_models_manager_config()) .await; - let base_instructions = config - .base_instructions - .clone() - .or_else(|| conversation_history.get_base_instructions().map(|s| s.text)) - .unwrap_or_else(|| model_info.get_model_instructions(config.personality)); + let base_instructions = match config.base_instructions.clone() { + Some(base_instructions) => base_instructions, + None => conversation_history + .get_base_instructions() + .map(|base_instructions| { + base_instructions.map(|base_instructions| base_instructions.text) + }) + .unwrap_or_else(|| Some(model_info.get_model_instructions(config.personality))), + }; // Respect thread-start tools. When missing (resumed/forked threads), read from the db // first, then fall back to rollout-file tools. @@ -610,6 +616,14 @@ impl Codex { dynamic_tools }; + let developer_instructions_override = config + .developer_instructions_override + .clone() + .or_else(|| conversation_history.get_developer_instructions()); + let developer_instructions = developer_instructions_override + .clone() + .unwrap_or_else(|| config.developer_instructions.clone()); + // TODO (aibrahim): Consolidate config.model and config.model_reasoning_effort into config.collaboration_mode // to avoid extracting these fields separately and constructing CollaborationMode here. let collaboration_mode = CollaborationMode { @@ -625,7 +639,8 @@ impl Codex { collaboration_mode, model_reasoning_summary: config.model_reasoning_summary, service_tier: config.service_tier, - developer_instructions: config.developer_instructions.clone(), + developer_instructions, + developer_instructions_override, user_instructions, personality: config.personality, base_instructions, @@ -664,12 +679,12 @@ impl Codex { agent_status_tx.clone(), conversation_history, session_source_clone, - environment_manager, skills_manager, plugins_manager, mcp_manager.clone(), skills_watcher, agent_control, + environment, ) .await .map_err(|e| { @@ -693,12 +708,7 @@ impl Codex { session_loop_termination: session_loop_termination_from_handle(session_loop_handle), }; - #[allow(deprecated)] - Ok(CodexSpawnOk { - codex, - thread_id, - conversation_id: thread_id, - }) + Ok(CodexSpawnOk { codex, thread_id }) } /// Submit the `op` wrapped in a `Submission` with a unique ID. @@ -862,7 +872,7 @@ pub(crate) struct TurnContext { pub(crate) reasoning_effort: Option, pub(crate) reasoning_summary: ReasoningSummaryConfig, pub(crate) session_source: SessionSource, - pub(crate) environment: Arc, + pub(crate) environment: Option>, /// The session's absolute working directory. All relative paths provided /// by the model as well as sandbox policies are resolved against this path /// instead of `std::env::current_dir()`. @@ -905,8 +915,13 @@ impl TurnContext { } pub(crate) fn apps_enabled(&self) -> bool { - self.features - .apps_enabled_cached(self.auth_manager.as_deref()) + let is_chatgpt_auth = self + .auth_manager + .as_deref() + .and_then(AuthManager::auth_cached) + .as_ref() + .is_some_and(CodexAuth::is_chatgpt_auth); + self.features.apps_enabled_for_auth(is_chatgpt_auth) } pub(crate) async fn with_model(&self, model: String, models_manager: &ModelsManager) -> Self { @@ -958,6 +973,7 @@ impl TurnContext { .with_unified_exec_shell_mode(self.tools_config.unified_exec_shell_mode.clone()) .with_web_search_config(self.tools_config.web_search_config.clone()) .with_allow_login_shell(self.tools_config.allow_login_shell) + .with_has_environment(self.tools_config.has_environment) .with_agent_type_description(crate::agent::role::spawn_tool_spec::build( &config.agent_roles, )); @@ -977,7 +993,7 @@ impl TurnContext { reasoning_effort, reasoning_summary: self.reasoning_summary, session_source: self.session_source.clone(), - environment: Arc::clone(&self.environment), + environment: self.environment.clone(), cwd: self.cwd.clone(), current_date: self.current_date.clone(), timezone: self.timezone.clone(), @@ -1089,6 +1105,10 @@ pub(crate) struct SessionConfiguration { /// Developer instructions that supplement the base instructions. developer_instructions: Option, + /// Explicit developer instructions override, preserving `null` as distinct + /// from a missing override. + developer_instructions_override: Option>, + /// Model instructions that are appended to the base instructions. user_instructions: Option, @@ -1096,7 +1116,7 @@ pub(crate) struct SessionConfiguration { personality: Option, /// Base instructions for the session. - base_instructions: String, + base_instructions: Option, /// Compact prompt override. compact_prompt: Option, @@ -1406,7 +1426,7 @@ impl Session { model_info: ModelInfo, models_manager: &ModelsManager, network: Option, - environment: Arc, + environment: Option>, sub_id: String, js_repl: Arc, skills_outcome: Arc, @@ -1439,6 +1459,7 @@ impl Session { ) .with_web_search_config(per_turn_config.web_search_config.clone()) .with_allow_login_shell(per_turn_config.permissions.allow_login_shell) + .with_has_environment(environment.is_some()) .with_agent_type_description(crate::agent::role::spawn_tool_spec::build( &per_turn_config.agent_roles, )); @@ -1511,12 +1532,12 @@ impl Session { agent_status: watch::Sender, initial_history: InitialHistory, session_source: SessionSource, - environment_manager: Arc, skills_manager: Arc, plugins_manager: Arc, mcp_manager: Arc, skills_watcher: Arc, agent_control: AgentControl, + environment: Option>, ) -> anyhow::Result> { debug!( "Configuring session: model={}; provider={:?}", @@ -1534,9 +1555,13 @@ impl Session { conversation_id, forked_from_id, session_source, - BaseInstructions { - text: session_configuration.base_instructions.clone(), - }, + session_configuration + .base_instructions + .clone() + .map(|text| BaseInstructions { text }), + session_configuration + .developer_instructions_override + .clone(), session_configuration.dynamic_tools.clone(), if session_configuration.persist_extended_history { EventPersistenceMode::Extended @@ -1903,6 +1928,7 @@ impl Session { }); } + let installation_id = resolve_installation_id(&config.codex_home).await?; let services = SessionServices { // Initialize the MCP connection manager with an uninitialized // instance. It will be replaced with one created via @@ -1946,6 +1972,7 @@ impl Session { model_client: ModelClient::new( Some(Arc::clone(&auth_manager)), conversation_id, + installation_id, session_configuration.provider.clone(), session_configuration.session_source.clone(), config.model_verbosity, @@ -1956,7 +1983,7 @@ impl Session { code_mode_service: crate::tools::code_mode::CodeModeService::new( config.js_repl_node_path.clone(), ), - environment: environment_manager.current().await?, + environment, }; services .model_client @@ -2098,8 +2125,9 @@ impl Session { )); } } - sess.schedule_startup_prewarm(session_configuration.base_instructions.clone()) - .await; + if let Some(base_instructions) = session_configuration.base_instructions.clone() { + sess.schedule_startup_prewarm(base_instructions).await; + } let session_start_source = match &initial_history { InitialHistory::Resumed(_) => codex_hooks::SessionStartSource::Resume, InitialHistory::New | InitialHistory::Forked(_) => { @@ -2201,11 +2229,13 @@ impl Session { state.history.estimate_token_count(turn_context) } - pub(crate) async fn get_base_instructions(&self) -> BaseInstructions { + pub(crate) async fn get_base_instructions(&self) -> Option { let state = self.state.lock().await; - BaseInstructions { - text: state.session_configuration.base_instructions.clone(), - } + state + .session_configuration + .base_instructions + .clone() + .map(|text| BaseInstructions { text }) } // Merges connector IDs into the session-level explicit connector selection. @@ -2542,7 +2572,7 @@ impl Session { .network_proxy .as_ref() .map(StartedNetworkProxy::proxy), - Arc::clone(&self.services.environment), + self.services.environment.clone(), sub_id, Arc::clone(&self.js_repl), skills_outcome, @@ -3609,7 +3639,11 @@ impl Session { state.reference_context_item(), state.previous_turn_settings(), state.session_configuration.collaboration_mode.clone(), - state.session_configuration.base_instructions.clone(), + state + .session_configuration + .base_instructions + .clone() + .unwrap_or_default(), state.session_configuration.session_source.clone(), ) }; @@ -3850,7 +3884,13 @@ impl Session { pub(crate) async fn recompute_token_usage(&self, turn_context: &TurnContext) { let history = self.clone_history().await; - let base_instructions = self.get_base_instructions().await; + let empty_base_instructions = BaseInstructions { + text: String::new(), + }; + let base_instructions = self + .get_base_instructions() + .await + .unwrap_or(empty_base_instructions); let Some(estimated_total_tokens) = history.estimate_token_count_with_base_instructions(&base_instructions) else { @@ -4762,8 +4802,8 @@ mod handlers { use crate::tasks::UserShellCommandMode; use crate::tasks::UserShellCommandTask; use crate::tasks::execute_user_shell_command; - use codex_mcp::mcp::auth::compute_auth_statuses; - use codex_mcp::mcp::collect_mcp_snapshot_from_manager; + use codex_mcp::collect_mcp_snapshot_from_manager; + use codex_mcp::compute_auth_statuses; use codex_protocol::protocol::CodexErrorInfo; use codex_protocol::protocol::ErrorEvent; use codex_protocol::protocol::Event; @@ -5614,6 +5654,7 @@ async fn spawn_review_thread( ) .with_web_search_config(/*web_search_config*/ None) .with_allow_login_shell(config.permissions.allow_login_shell) + .with_has_environment(parent_turn_context.environment.is_some()) .with_agent_type_description(crate::agent::role::spawn_tool_spec::build( &config.agent_roles, )); @@ -5672,7 +5713,7 @@ async fn spawn_review_thread( reasoning_effort, reasoning_summary, session_source, - environment: Arc::clone(&parent_turn_context.environment), + environment: parent_turn_context.environment.clone(), tools_config, features: parent_turn_context.features.clone(), ghost_snapshot: parent_turn_context.ghost_snapshot.clone(), @@ -6543,7 +6584,7 @@ pub(crate) fn build_prompt( input: Vec, router: &ToolRouter, turn_context: &TurnContext, - base_instructions: BaseInstructions, + base_instructions: Option, ) -> Prompt { let deferred_dynamic_tools = turn_context .dynamic_tools diff --git a/codex-rs/core/src/codex/rollout_reconstruction_tests.rs b/codex-rs/core/src/codex/rollout_reconstruction_tests.rs index 5dd4f60e14..753244ac2b 100644 --- a/codex-rs/core/src/codex/rollout_reconstruction_tests.rs +++ b/codex-rs/core/src/codex/rollout_reconstruction_tests.rs @@ -128,6 +128,7 @@ async fn record_initial_history_resumed_hydrates_previous_turn_settings_from_lif RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -145,6 +146,8 @@ async fn record_initial_history_resumed_hydrates_previous_turn_settings_from_lif codex_protocol::protocol::TurnCompleteEvent { turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), ]; @@ -190,6 +193,7 @@ async fn reconstruct_history_rollback_keeps_history_and_metadata_in_sync_for_com RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: first_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -209,11 +213,14 @@ async fn reconstruct_history_rollback_keeps_history_and_metadata_in_sync_for_com codex_protocol::protocol::TurnCompleteEvent { turn_id: first_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: rolled_back_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -233,6 +240,8 @@ async fn reconstruct_history_rollback_keeps_history_and_metadata_in_sync_for_com codex_protocol::protocol::TurnCompleteEvent { turn_id: rolled_back_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::ThreadRolledBack( @@ -280,6 +289,7 @@ async fn reconstruct_history_rollback_keeps_history_and_metadata_in_sync_for_inc RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: first_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -299,11 +309,14 @@ async fn reconstruct_history_rollback_keeps_history_and_metadata_in_sync_for_inc codex_protocol::protocol::TurnCompleteEvent { turn_id: first_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: incomplete_turn_id, + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -365,6 +378,7 @@ async fn reconstruct_history_rollback_skips_non_user_turns_for_history_and_metad RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: first_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -384,11 +398,14 @@ async fn reconstruct_history_rollback_skips_non_user_turns_for_history_and_metad codex_protocol::protocol::TurnCompleteEvent { turn_id: first_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: second_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -407,11 +424,14 @@ async fn reconstruct_history_rollback_skips_non_user_turns_for_history_and_metad codex_protocol::protocol::TurnCompleteEvent { turn_id: second_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: standalone_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -421,6 +441,8 @@ async fn reconstruct_history_rollback_skips_non_user_turns_for_history_and_metad codex_protocol::protocol::TurnCompleteEvent { turn_id: standalone_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::ThreadRolledBack( @@ -471,6 +493,7 @@ async fn reconstruct_history_rollback_counts_inter_agent_assistant_turns() { RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: first_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -490,11 +513,14 @@ async fn reconstruct_history_rollback_counts_inter_agent_assistant_turns() { codex_protocol::protocol::TurnCompleteEvent { turn_id: first_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: assistant_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -506,6 +532,8 @@ async fn reconstruct_history_rollback_counts_inter_agent_assistant_turns() { codex_protocol::protocol::TurnCompleteEvent { turn_id: assistant_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::ThreadRolledBack( @@ -551,6 +579,7 @@ async fn reconstruct_history_rollback_clears_history_and_metadata_when_exceeding RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: only_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -570,6 +599,8 @@ async fn reconstruct_history_rollback_clears_history_and_metadata_when_exceeding codex_protocol::protocol::TurnCompleteEvent { turn_id: only_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::ThreadRolledBack( @@ -599,6 +630,7 @@ async fn record_initial_history_resumed_rollback_skips_only_user_turns() { RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: user_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -616,12 +648,15 @@ async fn record_initial_history_resumed_rollback_skips_only_user_turns() { codex_protocol::protocol::TurnCompleteEvent { turn_id: user_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), // Standalone task turn (no UserMessage) should not consume rollback skips. RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: standalone_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -630,6 +665,8 @@ async fn record_initial_history_resumed_rollback_skips_only_user_turns() { codex_protocol::protocol::TurnCompleteEvent { turn_id: standalone_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::ThreadRolledBack( @@ -663,6 +700,7 @@ async fn record_initial_history_resumed_rollback_drops_incomplete_user_turn_comp RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: previous_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -680,11 +718,14 @@ async fn record_initial_history_resumed_rollback_drops_incomplete_user_turn_comp codex_protocol::protocol::TurnCompleteEvent { turn_id: previous_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: incomplete_turn_id, + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -815,6 +856,7 @@ async fn reconstruct_history_legacy_compaction_without_replacement_history_clear RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: current_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -832,6 +874,8 @@ async fn reconstruct_history_legacy_compaction_without_replacement_history_clear codex_protocol::protocol::TurnCompleteEvent { turn_id: current_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), ]; @@ -876,6 +920,7 @@ async fn record_initial_history_resumed_turn_context_after_compaction_reestablis RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: previous_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -898,6 +943,8 @@ async fn record_initial_history_resumed_turn_context_after_compaction_reestablis codex_protocol::protocol::TurnCompleteEvent { turn_id: previous_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), ]; @@ -979,6 +1026,7 @@ async fn record_initial_history_resumed_aborted_turn_without_id_clears_active_tu RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: previous_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -996,11 +1044,14 @@ async fn record_initial_history_resumed_aborted_turn_without_id_clears_active_tu codex_protocol::protocol::TurnCompleteEvent { turn_id: previous_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: aborted_turn_id, + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1017,6 +1068,8 @@ async fn record_initial_history_resumed_aborted_turn_without_id_clears_active_tu codex_protocol::protocol::TurnAbortedEvent { turn_id: None, reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }, )), RolloutItem::Compacted(CompactedItem { @@ -1080,6 +1133,7 @@ async fn record_initial_history_resumed_unmatched_abort_preserves_active_turn_fo RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: previous_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1097,11 +1151,14 @@ async fn record_initial_history_resumed_unmatched_abort_preserves_active_turn_fo codex_protocol::protocol::TurnCompleteEvent { turn_id: previous_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: current_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1118,6 +1175,8 @@ async fn record_initial_history_resumed_unmatched_abort_preserves_active_turn_fo codex_protocol::protocol::TurnAbortedEvent { turn_id: Some(unmatched_abort_turn_id), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }, )), RolloutItem::TurnContext(current_context_item.clone()), @@ -1125,6 +1184,8 @@ async fn record_initial_history_resumed_unmatched_abort_preserves_active_turn_fo codex_protocol::protocol::TurnCompleteEvent { turn_id: current_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), ]; @@ -1187,6 +1248,7 @@ async fn record_initial_history_resumed_trailing_incomplete_turn_compaction_clea RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: previous_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1204,11 +1266,14 @@ async fn record_initial_history_resumed_trailing_incomplete_turn_compaction_clea codex_protocol::protocol::TurnCompleteEvent { turn_id: previous_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: incomplete_turn_id, + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1258,6 +1323,7 @@ async fn record_initial_history_resumed_trailing_incomplete_turn_preserves_turn_ RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: current_turn_id, + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1332,6 +1398,7 @@ async fn record_initial_history_resumed_replaced_incomplete_compacted_turn_clear RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: previous_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1349,11 +1416,14 @@ async fn record_initial_history_resumed_replaced_incomplete_compacted_turn_clear codex_protocol::protocol::TurnCompleteEvent { turn_id: previous_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: compacted_incomplete_turn_id, + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1375,6 +1445,7 @@ async fn record_initial_history_resumed_replaced_incomplete_compacted_turn_clear RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: replacing_turn_id, + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, diff --git a/codex-rs/core/src/codex_delegate.rs b/codex-rs/core/src/codex_delegate.rs index 93d3eba021..aef0a92d76 100644 --- a/codex-rs/core/src/codex_delegate.rs +++ b/codex-rs/core/src/codex_delegate.rs @@ -78,8 +78,8 @@ pub(crate) async fn run_codex_thread_interactive( config, auth_manager, models_manager, - environment_manager: Arc::new(EnvironmentManager::new( - parent_ctx.environment.exec_server_url().map(str::to_owned), + environment_manager: Arc::new(EnvironmentManager::from_environment( + parent_ctx.environment.as_deref(), )), skills_manager: Arc::clone(&parent_session.services.skills_manager), plugins_manager: Arc::clone(&parent_session.services.plugins_manager), @@ -528,64 +528,60 @@ async fn handle_patch_approval( } = event; let approval_id = call_id.clone(); let guardian_decision = if routes_approval_to_guardian(parent_ctx) { - let maybe_files = changes + let files = changes .keys() - .map(|path| parent_ctx.cwd.join(path).ok()) - .collect::>>(); - if let Some(files) = maybe_files { - let review_cancel = cancel_token.child_token(); - let patch = changes - .iter() - .map(|(path, change)| match change { - codex_protocol::protocol::FileChange::Add { content } => { - format!("*** Add File: {}\n{}", path.display(), content) + .map(|path| parent_ctx.cwd.join(path)) + .collect::>(); + let review_cancel = cancel_token.child_token(); + let patch = changes + .iter() + .map(|(path, change)| match change { + codex_protocol::protocol::FileChange::Add { content } => { + format!("*** Add File: {}\n{}", path.display(), content) + } + codex_protocol::protocol::FileChange::Delete { content } => { + format!("*** Delete File: {}\n{}", path.display(), content) + } + codex_protocol::protocol::FileChange::Update { + unified_diff, + move_path, + } => { + if let Some(move_path) = move_path { + format!( + "*** Update File: {}\n*** Move to: {}\n{}", + path.display(), + move_path.display(), + unified_diff + ) + } else { + format!("*** Update File: {}\n{}", path.display(), unified_diff) } - codex_protocol::protocol::FileChange::Delete { content } => { - format!("*** Delete File: {}\n{}", path.display(), content) - } - codex_protocol::protocol::FileChange::Update { - unified_diff, - move_path, - } => { - if let Some(move_path) = move_path { - format!( - "*** Update File: {}\n*** Move to: {}\n{}", - path.display(), - move_path.display(), - unified_diff - ) - } else { - format!("*** Update File: {}\n{}", path.display(), unified_diff) - } - } - }) - .collect::>() - .join("\n"); - let review_rx = spawn_guardian_review( - Arc::clone(parent_session), - Arc::clone(parent_ctx), - GuardianApprovalRequest::ApplyPatch { - id: approval_id.clone(), - cwd: parent_ctx.cwd.to_path_buf(), - files, - patch, - }, - reason.clone(), - review_cancel.clone(), - ); - Some( - await_approval_with_cancel( - async move { review_rx.await.unwrap_or_default() }, - parent_session, - &approval_id, - cancel_token, - Some(&review_cancel), - ) - .await, + } + }) + .collect::>() + .join("\n"); + let review_rx = spawn_guardian_review( + Arc::clone(parent_session), + Arc::clone(parent_ctx), + GuardianApprovalRequest::ApplyPatch { + id: approval_id.clone(), + cwd: parent_ctx.cwd.to_path_buf(), + files, + patch, + }, + reason.clone(), + review_cancel.clone(), + ); + Some( + await_approval_with_cancel( + async move { review_rx.await.unwrap_or_default() }, + parent_session, + &approval_id, + cancel_token, + Some(&review_cancel), ) - } else { - None - } + .await, + ) } else { None }; diff --git a/codex-rs/core/src/codex_delegate_tests.rs b/codex-rs/core/src/codex_delegate_tests.rs index a10b0c33e0..fbdeb765a8 100644 --- a/codex-rs/core/src/codex_delegate_tests.rs +++ b/codex-rs/core/src/codex_delegate_tests.rs @@ -53,6 +53,8 @@ async fn forward_events_cancelled_while_send_blocked_shuts_down_delegate() { msg: EventMsg::TurnAborted(TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }), }) .await diff --git a/codex-rs/core/src/codex_tests.rs b/codex-rs/core/src/codex_tests.rs index 85404d4d56..29fe9df89c 100644 --- a/codex-rs/core/src/codex_tests.rs +++ b/codex-rs/core/src/codex_tests.rs @@ -15,7 +15,7 @@ use crate::tools::format_exec_output_str; use codex_features::Features; use codex_login::CodexAuth; -use codex_mcp::mcp_connection_manager::ToolInfo; +use codex_mcp::ToolInfo; use codex_model_provider_info::ModelProviderInfo; use codex_models_manager::bundled_models_response; use codex_models_manager::model_info; @@ -36,9 +36,9 @@ use codex_protocol::request_permissions::PermissionGrantScope; use codex_protocol::request_permissions::RequestPermissionProfile; use tracing::Span; +use crate::RolloutRecorderParams; use crate::rollout::policy::EventPersistenceMode; use crate::rollout::recorder::RolloutRecorder; -use crate::rollout::recorder::RolloutRecorderParams; use crate::state::TaskKind; use crate::tasks::SessionTask; use crate::tasks::SessionTaskContext; @@ -235,13 +235,19 @@ async fn interrupting_regular_turn_waiting_on_startup_prewarm_emits_turn_aborted .await .expect("expected turn aborted event") .expect("channel open"); - assert!(matches!( - second.msg, - EventMsg::TurnAborted(TurnAbortedEvent { - turn_id: Some(turn_id), - reason: TurnAbortReason::Interrupted, - }) if turn_id == tc.sub_id - )); + let EventMsg::TurnAborted(TurnAbortedEvent { + turn_id, + reason, + completed_at, + duration_ms, + }) = second.msg + else { + panic!("expected turn aborted event"); + }; + assert_eq!(turn_id, Some(tc.sub_id.clone())); + assert_eq!(reason, TurnAbortReason::Interrupted); + assert!(completed_at.is_some()); + assert!(duration_ms.is_some()); } fn test_model_client_session() -> crate::client::ModelClientSession { @@ -249,6 +255,7 @@ fn test_model_client_session() -> crate::client::ModelClientSession { /*auth_manager*/ None, ThreadId::try_from("00000000-0000-4000-8000-000000000001") .expect("test thread id should be valid"), + /*installation_id*/ "11111111-1111-4111-8111-111111111111".to_string(), ModelProviderInfo::create_openai_provider(/* base_url */ /*base_url*/ None), codex_protocol::protocol::SessionSource::Exec, /*model_verbosity*/ None, @@ -585,11 +592,15 @@ async fn get_base_instructions_no_user_content() { { let mut state = session.state.lock().await; - state.session_configuration.base_instructions = model_info.base_instructions.clone(); + state.session_configuration.base_instructions = + Some(model_info.base_instructions.clone()); } let base_instructions = session.get_base_instructions().await; - assert_eq!(base_instructions.text, model_info.base_instructions); + assert_eq!( + base_instructions.expect("base instructions").text, + model_info.base_instructions + ); } } @@ -1085,7 +1096,7 @@ async fn recompute_token_usage_uses_session_base_instructions() { let override_instructions = "SESSION_OVERRIDE_INSTRUCTIONS_ONLY".repeat(120); { let mut state = session.state.lock().await; - state.session_configuration.base_instructions = override_instructions.clone(); + state.session_configuration.base_instructions = Some(override_instructions.clone()); } let item = user_message("hello"); @@ -1300,6 +1311,7 @@ async fn record_initial_history_forked_hydrates_previous_turn_settings() { RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1317,6 +1329,8 @@ async fn record_initial_history_forked_hydrates_previous_turn_settings() { codex_protocol::protocol::TurnCompleteEvent { turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, }, )), ]; @@ -1481,6 +1495,7 @@ async fn thread_rollback_recomputes_previous_turn_settings_and_reference_context RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: first_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1499,10 +1514,13 @@ async fn thread_rollback_recomputes_previous_turn_settings_and_reference_context RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id: first_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, })), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: rolled_back_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1521,6 +1539,8 @@ async fn thread_rollback_recomputes_previous_turn_settings_and_reference_context RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id: rolled_back_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, })), ]) .await; @@ -1579,6 +1599,7 @@ async fn thread_rollback_restores_cleared_reference_context_item_after_compactio RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: first_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1595,10 +1616,13 @@ async fn thread_rollback_restores_cleared_reference_context_item_after_compactio RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id: first_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, })), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: compact_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1610,10 +1634,13 @@ async fn thread_rollback_restores_cleared_reference_context_item_after_compactio RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id: compact_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, })), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: rolled_back_turn_id.clone(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1634,6 +1661,8 @@ async fn thread_rollback_restores_cleared_reference_context_item_after_compactio RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id: rolled_back_turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, })), ]) .await; @@ -1661,6 +1690,7 @@ async fn thread_rollback_persists_marker_and_replays_cumulatively() { RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1677,10 +1707,13 @@ async fn thread_rollback_persists_marker_and_replays_cumulatively() { RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, })), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: "turn-2".to_string(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1697,10 +1730,13 @@ async fn thread_rollback_persists_marker_and_replays_cumulatively() { RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-2".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, })), RolloutItem::EventMsg(EventMsg::TurnStarted( codex_protocol::protocol::TurnStartedEvent { turn_id: "turn-3".to_string(), + started_at: None, model_context_window: Some(128_000), collaboration_mode_kind: ModeKind::Default, }, @@ -1717,6 +1753,8 @@ async fn thread_rollback_persists_marker_and_replays_cumulatively() { RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-3".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, })), ]) .await; @@ -1816,13 +1854,14 @@ async fn set_rate_limits_retains_previous_credits() { collaboration_mode, model_reasoning_summary: config.model_reasoning_summary, developer_instructions: config.developer_instructions.clone(), + developer_instructions_override: config.developer_instructions_override.clone(), user_instructions: config.user_instructions.clone(), service_tier: None, personality: config.personality, base_instructions: config .base_instructions .clone() - .unwrap_or_else(|| model_info.get_model_instructions(config.personality)), + .unwrap_or_else(|| Some(model_info.get_model_instructions(config.personality))), compact_prompt: config.compact_prompt.clone(), approval_policy: config.permissions.approval_policy.clone(), approvals_reviewer: config.approvals_reviewer, @@ -1918,13 +1957,14 @@ async fn set_rate_limits_updates_plan_type_when_present() { collaboration_mode, model_reasoning_summary: config.model_reasoning_summary, developer_instructions: config.developer_instructions.clone(), + developer_instructions_override: config.developer_instructions_override.clone(), user_instructions: config.user_instructions.clone(), service_tier: None, personality: config.personality, base_instructions: config .base_instructions .clone() - .unwrap_or_else(|| model_info.get_model_instructions(config.personality)), + .unwrap_or_else(|| Some(model_info.get_model_instructions(config.personality))), compact_prompt: config.compact_prompt.clone(), approval_policy: config.permissions.approval_policy.clone(), approvals_reviewer: config.approvals_reviewer, @@ -2189,7 +2229,8 @@ async fn attach_rollout_recorder(session: &Arc) -> PathBuf { ThreadId::default(), /*forked_from_id*/ None, SessionSource::Exec, - BaseInstructions::default(), + Some(BaseInstructions::default()), + /*developer_instructions*/ None, Vec::new(), EventPersistenceMode::Limited, ), @@ -2267,13 +2308,14 @@ pub(crate) async fn make_session_configuration_for_tests() -> SessionConfigurati collaboration_mode, model_reasoning_summary: config.model_reasoning_summary, developer_instructions: config.developer_instructions.clone(), + developer_instructions_override: config.developer_instructions_override.clone(), user_instructions: config.user_instructions.clone(), service_tier: None, personality: config.personality, base_instructions: config .base_instructions .clone() - .unwrap_or_else(|| model_info.get_model_instructions(config.personality)), + .unwrap_or_else(|| Some(model_info.get_model_instructions(config.personality))), compact_prompt: config.compact_prompt.clone(), approval_policy: config.permissions.approval_policy.clone(), approvals_reviewer: config.approvals_reviewer, @@ -2470,10 +2512,7 @@ async fn session_configuration_apply_rederives_legacy_file_system_policy_on_cwd_ #[tokio::test] async fn session_update_settings_keeps_runtime_cwds_absolute() { let (session, turn_context) = make_session_and_context().await; - let updated_cwd = turn_context - .cwd - .join("project") - .expect("resolve project dir"); + let updated_cwd = turn_context.cwd.join("project"); std::fs::create_dir_all(updated_cwd.as_path()).expect("create project dir"); session @@ -2533,13 +2572,14 @@ async fn session_new_fails_when_zsh_fork_enabled_without_zsh_path() { collaboration_mode, model_reasoning_summary: config.model_reasoning_summary, developer_instructions: config.developer_instructions.clone(), + developer_instructions_override: config.developer_instructions_override.clone(), user_instructions: config.user_instructions.clone(), service_tier: None, personality: config.personality, base_instructions: config .base_instructions .clone() - .unwrap_or_else(|| model_info.get_model_instructions(config.personality)), + .unwrap_or_else(|| Some(model_info.get_model_instructions(config.personality))), compact_prompt: config.compact_prompt.clone(), approval_policy: config.permissions.approval_policy.clone(), approvals_reviewer: config.approvals_reviewer, @@ -2579,14 +2619,16 @@ async fn session_new_fails_when_zsh_fork_enabled_without_zsh_path() { agent_status_tx, InitialHistory::New, SessionSource::Exec, - Arc::new(codex_exec_server::EnvironmentManager::new( - /*exec_server_url*/ None, - )), skills_manager, plugins_manager, mcp_manager, Arc::new(SkillsWatcher::noop()), AgentControl::default(), + Some(Arc::new( + codex_exec_server::Environment::create(/*exec_server_url*/ None) + .await + .expect("create environment"), + )), ) .await; @@ -2634,13 +2676,14 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) { collaboration_mode, model_reasoning_summary: config.model_reasoning_summary, developer_instructions: config.developer_instructions.clone(), + developer_instructions_override: config.developer_instructions_override.clone(), user_instructions: config.user_instructions.clone(), service_tier: None, personality: config.personality, base_instructions: config .base_instructions .clone() - .unwrap_or_else(|| model_info.get_model_instructions(config.personality)), + .unwrap_or_else(|| Some(model_info.get_model_instructions(config.personality))), compact_prompt: config.compact_prompt.clone(), approval_policy: config.permissions.approval_policy.clone(), approvals_reviewer: config.approvals_reviewer, @@ -2727,6 +2770,7 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) { model_client: ModelClient::new( Some(auth_manager.clone()), conversation_id, + /*installation_id*/ "11111111-1111-4111-8111-111111111111".to_string(), session_configuration.provider.clone(), session_configuration.session_source.clone(), config.model_verbosity, @@ -2737,7 +2781,7 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) { code_mode_service: crate::tools::code_mode::CodeModeService::new( config.js_repl_node_path.clone(), ), - environment: Arc::clone(&environment), + environment: Some(Arc::clone(&environment)), }; let js_repl = Arc::new(JsReplHandle::with_node_path( config.js_repl_node_path.clone(), @@ -2764,7 +2808,7 @@ pub(crate) async fn make_session_and_context() -> (Session, TurnContext) { model_info, &models_manager, /*network*/ None, - environment, + Some(environment), "turn_id".to_string(), Arc::clone(&js_repl), skills_outcome, @@ -3474,13 +3518,14 @@ pub(crate) async fn make_session_and_context_with_dynamic_tools_and_rx( collaboration_mode, model_reasoning_summary: config.model_reasoning_summary, developer_instructions: config.developer_instructions.clone(), + developer_instructions_override: config.developer_instructions_override.clone(), user_instructions: config.user_instructions.clone(), service_tier: None, personality: config.personality, base_instructions: config .base_instructions .clone() - .unwrap_or_else(|| model_info.get_model_instructions(config.personality)), + .unwrap_or_else(|| Some(model_info.get_model_instructions(config.personality))), compact_prompt: config.compact_prompt.clone(), approval_policy: config.permissions.approval_policy.clone(), approvals_reviewer: config.approvals_reviewer, @@ -3567,6 +3612,7 @@ pub(crate) async fn make_session_and_context_with_dynamic_tools_and_rx( model_client: ModelClient::new( Some(Arc::clone(&auth_manager)), conversation_id, + /*installation_id*/ "11111111-1111-4111-8111-111111111111".to_string(), session_configuration.provider.clone(), session_configuration.session_source.clone(), config.model_verbosity, @@ -3577,7 +3623,7 @@ pub(crate) async fn make_session_and_context_with_dynamic_tools_and_rx( code_mode_service: crate::tools::code_mode::CodeModeService::new( config.js_repl_node_path.clone(), ), - environment: Arc::clone(&environment), + environment: Some(Arc::clone(&environment)), }; let js_repl = Arc::new(JsReplHandle::with_node_path( config.js_repl_node_path.clone(), @@ -3604,7 +3650,7 @@ pub(crate) async fn make_session_and_context_with_dynamic_tools_and_rx( model_info, &models_manager, /*network*/ None, - environment, + Some(environment), "turn_id".to_string(), Arc::clone(&js_repl), skills_outcome, @@ -4050,18 +4096,13 @@ async fn handle_output_item_done_records_image_save_history_message() { let image_output_dir = image_output_path .parent() .expect("generated image path should have a parent"); - let save_message: ResponseItem = DeveloperInstructions::new(format!( - "Generated images are saved to {} as {} by default.", + let image_message: ResponseItem = DeveloperInstructions::new(format!( + "Generated images are saved to {} as {} by default.\nIf you need to use a generated image at another path, copy it and leave the original in place unless the user explicitly asks you to delete it.", image_output_dir.display(), image_output_path.display(), )) .into(); - let copy_message: ResponseItem = DeveloperInstructions::new( - "If you need to use a generated image at another path, copy it and leave the original in place unless the user explicitly asks you to delete it." - .to_string(), - ) - .into(); - assert_eq!(history.raw_items(), &[save_message, copy_message, item]); + assert_eq!(history.raw_items(), &[image_message, item]); assert_eq!( std::fs::read(&expected_saved_path).expect("saved file"), b"foo" @@ -4229,7 +4270,8 @@ async fn record_context_updates_and_set_reference_context_item_persists_baseline ThreadId::default(), /*forked_from_id*/ None, SessionSource::Exec, - BaseInstructions::default(), + Some(BaseInstructions::default()), + /*developer_instructions*/ None, Vec::new(), EventPersistenceMode::Limited, ), @@ -4326,7 +4368,8 @@ async fn record_context_updates_and_set_reference_context_item_persists_full_rei ThreadId::default(), /*forked_from_id*/ None, SessionSource::Exec, - BaseInstructions::default(), + Some(BaseInstructions::default()), + /*developer_instructions*/ None, Vec::new(), EventPersistenceMode::Limited, ), @@ -4624,6 +4667,7 @@ async fn task_finish_emits_turn_item_lifecycle_for_leftover_pending_user_input() EventMsg::TurnComplete(TurnCompleteEvent { turn_id, last_agent_message: None, + .. }) if turn_id == tc.sub_id )); } diff --git a/codex-rs/core/src/codex_thread.rs b/codex-rs/core/src/codex_thread.rs index 252b1b6ae3..9727cc208a 100644 --- a/codex-rs/core/src/codex_thread.rs +++ b/codex-rs/core/src/codex_thread.rs @@ -22,6 +22,7 @@ use codex_protocol::protocol::Submission; use codex_protocol::protocol::TokenUsage; use codex_protocol::protocol::W3cTraceContext; use codex_protocol::user_input::UserInput; +use rmcp::model::ReadResourceRequestParams; use std::path::PathBuf; use tokio::sync::Mutex; use tokio::sync::watch; @@ -199,6 +200,26 @@ impl CodexThread { self.codex.thread_config_snapshot().await } + pub async fn read_mcp_resource( + &self, + server: &str, + uri: &str, + ) -> anyhow::Result { + let result = self + .codex + .session + .read_resource( + server, + ReadResourceRequestParams { + meta: None, + uri: uri.to_string(), + }, + ) + .await?; + + Ok(serde_json::to_value(result)?) + } + pub fn enabled(&self, feature: Feature) -> bool { self.codex.enabled(feature) } diff --git a/codex-rs/core/src/compact.rs b/codex-rs/core/src/compact.rs index 27cf069268..8300dc650c 100644 --- a/codex-rs/core/src/compact.rs +++ b/codex-rs/core/src/compact.rs @@ -74,6 +74,7 @@ pub(crate) async fn run_compact_task( ) -> CodexResult<()> { let start_event = EventMsg::TurnStarted(TurnStartedEvent { turn_id: turn_context.sub_id.clone(), + started_at: turn_context.turn_timing_state.started_at_unix_secs().await, model_context_window: turn_context.model_context_window(), collaboration_mode_kind: turn_context.collaboration_mode.mode, }); diff --git a/codex-rs/core/src/compact_remote.rs b/codex-rs/core/src/compact_remote.rs index 118d460b8e..640ca3c1f8 100644 --- a/codex-rs/core/src/compact_remote.rs +++ b/codex-rs/core/src/compact_remote.rs @@ -40,6 +40,7 @@ pub(crate) async fn run_remote_compact_task( ) -> CodexResult<()> { let start_event = EventMsg::TurnStarted(TurnStartedEvent { turn_id: turn_context.sub_id.clone(), + started_at: turn_context.turn_timing_state.started_at_unix_secs().await, model_context_window: turn_context.model_context_window(), collaboration_mode_kind: turn_context.collaboration_mode.mode, }); @@ -75,10 +76,16 @@ async fn run_remote_compact_task_inner_impl( .await; let mut history = sess.clone_history().await; let base_instructions = sess.get_base_instructions().await; + let token_count_base_instructions = + base_instructions + .clone() + .unwrap_or_else(|| BaseInstructions { + text: String::new(), + }); let deleted_items = trim_function_call_history_to_fit_context_window( &mut history, turn_context.as_ref(), - &base_instructions, + &token_count_base_instructions, ); if deleted_items > 0 { info!( @@ -126,8 +133,13 @@ async fn run_remote_compact_task_inner_impl( ) .or_else(|err| async { let total_usage_breakdown = sess.get_total_token_usage_breakdown().await; + let base_instruction_text = prompt + .base_instructions + .as_ref() + .map(|base_instructions| base_instructions.text.as_str()) + .unwrap_or(""); let compact_request_log_data = - build_compact_request_log_data(&prompt.input, &prompt.base_instructions.text); + build_compact_request_log_data(&prompt.input, base_instruction_text); log_remote_compact_failure( turn_context, &compact_request_log_data, diff --git a/codex-rs/core/src/config/agent_roles.rs b/codex-rs/core/src/config/agent_roles.rs index c527435e92..24d26ebf47 100644 --- a/codex-rs/core/src/config/agent_roles.rs +++ b/codex-rs/core/src/config/agent_roles.rs @@ -1,9 +1,9 @@ use super::AgentRoleConfig; -use super::AgentRoleToml; -use super::AgentsToml; -use super::ConfigToml; use crate::config_loader::ConfigLayerStack; use crate::config_loader::ConfigLayerStackOrdering; +use codex_config::config_toml::AgentRoleToml; +use codex_config::config_toml::AgentsToml; +use codex_config::config_toml::ConfigToml; use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_absolute_path::AbsolutePathBufGuard; use serde::Deserialize; diff --git a/codex-rs/core/src/config/config_tests.rs b/codex-rs/core/src/config/config_tests.rs index 13e60ecb71..1201537da9 100644 --- a/codex-rs/core/src/config/config_tests.rs +++ b/codex-rs/core/src/config/config_tests.rs @@ -5,6 +5,24 @@ use crate::config_loader::RequirementSource; use crate::plugins::PluginsManager; use assert_matches::assert_matches; use codex_config::CONFIG_TOML_FILE; +use codex_config::config_toml::AgentRoleToml; +use codex_config::config_toml::AgentsToml; +use codex_config::config_toml::ConfigToml; +use codex_config::config_toml::ProjectConfig; +use codex_config::config_toml::RealtimeAudioConfig; +use codex_config::config_toml::RealtimeConfig; +use codex_config::config_toml::RealtimeToml; +use codex_config::config_toml::RealtimeWsMode; +use codex_config::config_toml::RealtimeWsVersion; +use codex_config::config_toml::ToolsToml; +use codex_config::permissions_toml::FilesystemPermissionToml; +use codex_config::permissions_toml::FilesystemPermissionsToml; +use codex_config::permissions_toml::NetworkDomainPermissionToml; +use codex_config::permissions_toml::NetworkDomainPermissionsToml; +use codex_config::permissions_toml::NetworkToml; +use codex_config::permissions_toml::PermissionProfileToml; +use codex_config::permissions_toml::PermissionsToml; +use codex_config::profile_toml::ConfigProfile; use codex_config::types::AppToolApproval; use codex_config::types::ApprovalsReviewer; use codex_config::types::BundledSkillsConfig; @@ -17,9 +35,14 @@ use codex_config::types::MemoriesToml; use codex_config::types::ModelAvailabilityNuxConfig; use codex_config::types::NotificationMethod; use codex_config::types::Notifications; +use codex_config::types::SandboxWorkspaceWrite; +use codex_config::types::SkillsConfig; use codex_config::types::ToolSuggestDiscoverableType; +use codex_config::types::Tui; use codex_features::Feature; use codex_features::FeaturesToml; +use codex_model_provider_info::LMSTUDIO_OSS_PROVIDER_ID; +use codex_model_provider_info::OLLAMA_OSS_PROVIDER_ID; use codex_model_provider_info::WireApi; use codex_models_manager::bundled_models_response; use codex_protocol::permissions::FileSystemAccessMode; @@ -28,6 +51,7 @@ use codex_protocol::permissions::FileSystemSandboxEntry; use codex_protocol::permissions::FileSystemSandboxPolicy; use codex_protocol::permissions::FileSystemSpecialPath; use codex_protocol::permissions::NetworkSandboxPolicy; +use codex_protocol::protocol::ReadOnlyAccess; use serde::Deserialize; use tempfile::tempdir; @@ -4482,6 +4506,7 @@ fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> { experimental_realtime_ws_startup_context: None, base_instructions: None, developer_instructions: None, + developer_instructions_override: None, guardian_developer_instructions: None, include_permissions_instructions: true, include_apps_instructions: true, @@ -4627,6 +4652,7 @@ fn test_precedence_fixture_with_gpt3_profile() -> std::io::Result<()> { experimental_realtime_ws_startup_context: None, base_instructions: None, developer_instructions: None, + developer_instructions_override: None, guardian_developer_instructions: None, include_permissions_instructions: true, include_apps_instructions: true, @@ -4770,6 +4796,7 @@ fn test_precedence_fixture_with_zdr_profile() -> std::io::Result<()> { experimental_realtime_ws_startup_context: None, base_instructions: None, developer_instructions: None, + developer_instructions_override: None, guardian_developer_instructions: None, include_permissions_instructions: true, include_apps_instructions: true, @@ -4899,6 +4926,7 @@ fn test_precedence_fixture_with_gpt5_profile() -> std::io::Result<()> { experimental_realtime_ws_startup_context: None, base_instructions: None, developer_instructions: None, + developer_instructions_override: None, guardian_developer_instructions: None, include_permissions_instructions: true, include_apps_instructions: true, diff --git a/codex-rs/core/src/config/managed_features.rs b/codex-rs/core/src/config/managed_features.rs index 44daa241cd..cbad012c6c 100644 --- a/codex-rs/core/src/config/managed_features.rs +++ b/codex-rs/core/src/config/managed_features.rs @@ -8,8 +8,8 @@ use codex_config::FeatureRequirementsToml; use codex_config::RequirementSource; use codex_config::Sourced; -use crate::config::ConfigToml; -use crate::config::profile::ConfigProfile; +use codex_config::config_toml::ConfigToml; +use codex_config::profile_toml::ConfigProfile; use codex_features::Feature; use codex_features::FeatureConfigSource; use codex_features::FeatureOverrides; diff --git a/codex-rs/core/src/config/mod.rs b/codex-rs/core/src/config/mod.rs index d83fc87371..3e9626866b 100644 --- a/codex-rs/core/src/config/mod.rs +++ b/codex-rs/core/src/config/mod.rs @@ -22,49 +22,42 @@ use crate::unified_exec::MIN_EMPTY_YIELD_TIME_MS; use crate::windows_sandbox::WindowsSandboxLevelExt; use crate::windows_sandbox::resolve_windows_sandbox_mode; use crate::windows_sandbox::resolve_windows_sandbox_private_desktop; -use codex_app_server_protocol::Tools; -use codex_app_server_protocol::UserSavedConfig; +use codex_config::config_toml::ConfigToml; +use codex_config::config_toml::ProjectConfig; +use codex_config::config_toml::RealtimeAudioConfig; +use codex_config::config_toml::RealtimeConfig; +use codex_config::config_toml::validate_model_providers; +use codex_config::profile_toml::ConfigProfile; use codex_config::types::ApprovalsReviewer; -use codex_config::types::AppsConfigToml; +use codex_config::types::AuthCredentialsStoreMode; use codex_config::types::DEFAULT_OTEL_ENVIRONMENT; use codex_config::types::History; use codex_config::types::McpServerConfig; use codex_config::types::McpServerDisabledReason; use codex_config::types::McpServerTransportConfig; use codex_config::types::MemoriesConfig; -use codex_config::types::MemoriesToml; use codex_config::types::ModelAvailabilityNuxConfig; use codex_config::types::Notice; use codex_config::types::NotificationMethod; use codex_config::types::Notifications; +use codex_config::types::OAuthCredentialsStoreMode; use codex_config::types::OtelConfig; use codex_config::types::OtelConfigToml; use codex_config::types::OtelExporterKind; -use codex_config::types::PluginConfig; -use codex_config::types::SandboxWorkspaceWrite; use codex_config::types::ShellEnvironmentPolicy; -use codex_config::types::ShellEnvironmentPolicyToml; -use codex_config::types::SkillsConfig; use codex_config::types::ToolSuggestConfig; use codex_config::types::ToolSuggestDiscoverable; -use codex_config::types::Tui; use codex_config::types::UriBasedFileOpener; use codex_config::types::WindowsSandboxModeToml; -use codex_config::types::WindowsToml; use codex_features::Feature; use codex_features::FeatureConfigSource; use codex_features::FeatureOverrides; use codex_features::Features; -use codex_features::FeaturesToml; -use codex_git_utils::resolve_root_git_project_for_trust; -use codex_login::AuthCredentialsStoreMode; -use codex_mcp::mcp::McpConfig; +use codex_login::AuthManagerConfig; +use codex_mcp::McpConfig; use codex_model_provider_info::LEGACY_OLLAMA_CHAT_PROVIDER_ID; -use codex_model_provider_info::LMSTUDIO_OSS_PROVIDER_ID; use codex_model_provider_info::ModelProviderInfo; use codex_model_provider_info::OLLAMA_CHAT_PROVIDER_REMOVED_ERROR; -use codex_model_provider_info::OLLAMA_OSS_PROVIDER_ID; -use codex_model_provider_info::OPENAI_PROVIDER_ID; use codex_model_provider_info::built_in_model_providers; use codex_models_manager::ModelsManagerConfig; use codex_protocol::config_types::AltScreenMode; @@ -77,22 +70,16 @@ use codex_protocol::config_types::TrustLevel; use codex_protocol::config_types::Verbosity; use codex_protocol::config_types::WebSearchConfig; use codex_protocol::config_types::WebSearchMode; -use codex_protocol::config_types::WebSearchToolConfig; use codex_protocol::config_types::WindowsSandboxLevel; use codex_protocol::openai_models::ModelsResponse; use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::permissions::FileSystemSandboxPolicy; use codex_protocol::permissions::NetworkSandboxPolicy; use codex_protocol::protocol::AskForApproval; -use codex_protocol::protocol::ReadOnlyAccess; use codex_protocol::protocol::SandboxPolicy; -use codex_rmcp_client::OAuthCredentialsStoreMode; use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_absolute_path::AbsolutePathBufGuard; -use schemars::JsonSchema; use serde::Deserialize; -use serde::Deserializer; -use serde::Serialize; use std::collections::BTreeMap; use std::collections::HashMap; use std::io::ErrorKind; @@ -102,7 +89,6 @@ use std::path::PathBuf; use crate::config::permissions::compile_permission_profile; use crate::config::permissions::get_readable_roots_required_for_codex_runtime; use crate::config::permissions::network_proxy_config_from_profile_network; -use crate::config::profile::ConfigProfile; use codex_network_proxy::NetworkProxyConfig; use toml::Value as TomlValue; use toml_edit::DocumentMut; @@ -112,9 +98,9 @@ pub mod edit; mod managed_features; mod network_proxy_spec; mod permissions; -pub mod profile; -pub mod schema; -pub mod service; +#[cfg(test)] +mod schema; +pub(crate) mod service; pub use codex_config::Constrained; pub use codex_config::ConstraintError; pub use codex_config::ConstraintResult; @@ -123,16 +109,6 @@ pub use codex_sandboxing::system_bwrap_warning; pub use managed_features::ManagedFeatures; pub use network_proxy_spec::NetworkProxySpec; pub use network_proxy_spec::StartedNetworkProxy; -pub use permissions::FilesystemPermissionToml; -pub use permissions::FilesystemPermissionsToml; -pub use permissions::NetworkDomainPermissionToml; -pub use permissions::NetworkDomainPermissionsToml; -pub use permissions::NetworkToml; -pub use permissions::NetworkUnixSocketPermissionToml; -pub use permissions::NetworkUnixSocketPermissionsToml; -pub use permissions::PermissionProfileToml; -pub use permissions::PermissionsToml; -pub(crate) use permissions::overlay_network_domain_permissions; pub(crate) use permissions::resolve_permission_profile; pub use service::ConfigService; pub use service::ConfigServiceError; @@ -148,11 +124,6 @@ pub(crate) const DEFAULT_AGENT_MAX_DEPTH: i32 = 1; pub(crate) const DEFAULT_AGENT_JOB_MAX_RUNTIME_SECONDS: Option = None; pub const CONFIG_TOML_FILE: &str = "config.toml"; -const RESERVED_MODEL_PROVIDER_IDS: [&str; 3] = [ - OPENAI_PROVIDER_ID, - OLLAMA_OSS_PROVIDER_ID, - LMSTUDIO_OSS_PROVIDER_ID, -]; fn resolve_sqlite_home_env(resolved_cwd: &Path) -> Option { let raw = std::env::var(codex_state::SQLITE_HOME_ENV).ok()?; @@ -272,11 +243,15 @@ pub struct Config { pub user_instructions: Option, /// Base instructions override. - pub base_instructions: Option, + pub base_instructions: Option>, /// Developer instructions override injected as a separate message. pub developer_instructions: Option, + /// Explicit developer instructions override, preserving `null` as distinct + /// from a missing override. + pub developer_instructions_override: Option>, + /// Guardian-specific developer instructions override from requirements.toml. pub guardian_developer_instructions: Option, @@ -341,7 +316,7 @@ pub struct Config { /// Controls whether the TUI uses the terminal's alternate screen buffer. /// - /// This is the same `tui.alternate_screen` value from `config.toml` (see [`Tui`]). + /// This is the same `tui.alternate_screen` value from `config.toml`. /// - `auto` (default): Disable alternate screen in Zellij, enable elsewhere. /// - `always`: Always use alternate screen (original behavior). /// - `never`: Never use alternate screen (inline mode, preserves scrollback). @@ -593,6 +568,20 @@ pub struct Config { pub otel: codex_config::types::OtelConfig, } +impl AuthManagerConfig for Config { + fn codex_home(&self) -> PathBuf { + self.codex_home.clone() + } + + fn cli_auth_credentials_store_mode(&self) -> AuthCredentialsStoreMode { + self.cli_auth_credentials_store_mode + } + + fn forced_chatgpt_workspace_id(&self) -> Option { + self.forced_chatgpt_workspace_id.clone() + } +} + #[derive(Debug, Clone, Default)] pub struct ConfigBuilder { codex_home: Option, @@ -702,7 +691,7 @@ impl Config { model_context_window: self.model_context_window, model_auto_compact_token_limit: self.model_auto_compact_token_limit, tool_output_token_limit: self.tool_output_token_limit, - base_instructions: self.base_instructions.clone(), + base_instructions: self.base_instructions.clone().flatten(), personality_enabled: self.features.enabled(Feature::Personality), model_supports_reasoning_summaries: self.model_supports_reasoning_summaries, model_catalog: self.model_catalog.clone(), @@ -1100,26 +1089,7 @@ pub fn set_project_trust_level( /// Save the default OSS provider preference to config.toml pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::Result<()> { - // Validate that the provider is one of the known OSS providers - match provider { - LMSTUDIO_OSS_PROVIDER_ID | OLLAMA_OSS_PROVIDER_ID => { - // Valid provider, continue - } - LEGACY_OLLAMA_CHAT_PROVIDER_ID => { - return Err(std::io::Error::new( - std::io::ErrorKind::InvalidInput, - OLLAMA_CHAT_PROVIDER_REMOVED_ERROR, - )); - } - _ => { - return Err(std::io::Error::new( - std::io::ErrorKind::InvalidInput, - format!( - "Invalid OSS provider '{provider}'. Must be one of: {LMSTUDIO_OSS_PROVIDER_ID}, {OLLAMA_OSS_PROVIDER_ID}" - ), - )); - } - } + codex_config::config_toml::validate_oss_provider(provider)?; use toml_edit::value; let edits = [ConfigEdit::SetPath { @@ -1133,452 +1103,15 @@ pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::R .map_err(|err| std::io::Error::other(format!("failed to persist config.toml: {err}"))) } -/// Base config deserialized from ~/.codex/config.toml. -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, JsonSchema)] -#[schemars(deny_unknown_fields)] -pub struct ConfigToml { - /// Optional override of model selection. - pub model: Option, - /// Review model override used by the `/review` feature. - pub review_model: Option, - - /// Provider to use from the model_providers map. - pub model_provider: Option, - - /// Size of the context window for the model, in tokens. - pub model_context_window: Option, - - /// Token usage threshold triggering auto-compaction of conversation history. - pub model_auto_compact_token_limit: Option, - - /// Default approval policy for executing commands. - pub approval_policy: Option, - - /// Configures who approval requests are routed to for review once they have - /// been escalated. This does not disable separate safety checks such as - /// ARC. - pub approvals_reviewer: Option, - - #[serde(default)] - pub shell_environment_policy: ShellEnvironmentPolicyToml, - - /// Whether the model may request a login shell for shell-based tools. - /// Default to `true` - /// - /// If `true`, the model may request a login shell (`login = true`), and - /// omitting `login` defaults to using a login shell. - /// If `false`, the model can never use a login shell: `login = true` - /// requests are rejected, and omitting `login` defaults to a non-login - /// shell. - pub allow_login_shell: Option, - - /// Sandbox mode to use. - pub sandbox_mode: Option, - - /// Sandbox configuration to apply if `sandbox` is `WorkspaceWrite`. - pub sandbox_workspace_write: Option, - - /// Default named permissions profile to apply from the `[permissions]` - /// table. - pub default_permissions: Option, - - /// Named permissions profiles. - #[serde(default)] - pub permissions: Option, - - /// Optional external command to spawn for end-user notifications. - #[serde(default)] - pub notify: Option>, - - /// System instructions. - pub instructions: Option, - - /// Developer instructions inserted as a `developer` role message. - #[serde(default)] - pub developer_instructions: Option, - - /// Whether to inject the `` developer block. - pub include_permissions_instructions: Option, - - /// Whether to inject the `` developer block. - pub include_apps_instructions: Option, - - /// Whether to inject the `` user block. - pub include_environment_context: Option, - - /// Optional path to a file containing model instructions that will override - /// the built-in instructions for the selected model. Users are STRONGLY - /// DISCOURAGED from using this field, as deviating from the instructions - /// sanctioned by Codex will likely degrade model performance. - pub model_instructions_file: Option, - - /// Compact prompt used for history compaction. - pub compact_prompt: Option, - - /// Optional commit attribution text for commit message co-author trailers. - /// - /// Set to an empty string to disable automatic commit attribution. - pub commit_attribution: Option, - - /// When set, restricts ChatGPT login to a specific workspace identifier. - #[serde(default)] - pub forced_chatgpt_workspace_id: Option, - - /// When set, restricts the login mechanism users may use. - #[serde(default)] - pub forced_login_method: Option, - - /// Preferred backend for storing CLI auth credentials. - /// file (default): Use a file in the Codex home directory. - /// keyring: Use an OS-specific keyring service. - /// auto: Use the keyring if available, otherwise use a file. - #[serde(default)] - pub cli_auth_credentials_store: Option, - - /// Definition for MCP servers that Codex can reach out to for tool calls. - #[serde(default)] - // Uses the raw MCP input shape (custom deserialization) rather than `McpServerConfig`. - #[schemars(schema_with = "crate::config::schema::mcp_servers_schema")] - pub mcp_servers: HashMap, - - /// Preferred backend for storing MCP OAuth credentials. - /// keyring: Use an OS-specific keyring service. - /// https://github.com/openai/codex/blob/main/codex-rs/rmcp-client/src/oauth.rs#L2 - /// file: Use a file in the Codex home directory. - /// auto (default): Use the OS-specific keyring service if available, otherwise use a file. - #[serde(default)] - pub mcp_oauth_credentials_store: Option, - - /// Optional fixed port for the local HTTP callback server used during MCP OAuth login. - /// When unset, Codex will bind to an ephemeral port chosen by the OS. - pub mcp_oauth_callback_port: Option, - - /// Optional redirect URI to use during MCP OAuth login. - /// When set, this URI is used in the OAuth authorization request instead - /// of the local listener address. The local callback listener still binds - /// to 127.0.0.1 (using `mcp_oauth_callback_port` when provided). - pub mcp_oauth_callback_url: Option, - - /// User-defined provider entries that extend the built-in list. Built-in - /// IDs cannot be overridden. - #[serde(default, deserialize_with = "deserialize_model_providers")] - pub model_providers: HashMap, - - /// Maximum number of bytes to include from an AGENTS.md project doc file. - pub project_doc_max_bytes: Option, - - /// Ordered list of fallback filenames to look for when AGENTS.md is missing. - pub project_doc_fallback_filenames: Option>, - - /// Token budget applied when storing tool/function outputs in the context manager. - pub tool_output_token_limit: Option, - - /// Maximum poll window for background terminal output (`write_stdin`), in milliseconds. - /// Default: `300000` (5 minutes). - pub background_terminal_max_timeout: Option, - - /// Optional absolute path to the Node runtime used by `js_repl`. - pub js_repl_node_path: Option, - - /// Ordered list of directories to search for Node modules in `js_repl`. - pub js_repl_node_module_dirs: Option>, - - /// Optional absolute path to patched zsh used by zsh-exec-bridge-backed shell execution. - pub zsh_path: Option, - - /// Profile to use from the `profiles` map. - pub profile: Option, - - /// Named profiles to facilitate switching between different configurations. - #[serde(default)] - pub profiles: HashMap, - - /// Settings that govern if and what will be written to `~/.codex/history.jsonl`. - #[serde(default)] - pub history: Option, - - /// Directory where Codex stores the SQLite state DB. - /// Defaults to `$CODEX_SQLITE_HOME` when set. Otherwise uses `$CODEX_HOME`. - pub sqlite_home: Option, - - /// Directory where Codex writes log files, for example `codex-tui.log`. - /// Defaults to `$CODEX_HOME/log`. - pub log_dir: Option, - - /// Optional URI-based file opener. If set, citations to files in the model - /// output will be hyperlinked using the specified URI scheme. - pub file_opener: Option, - - /// Collection of settings that are specific to the TUI. - pub tui: Option, - - /// When set to `true`, `AgentReasoning` events will be hidden from the - /// UI/output. Defaults to `false`. - pub hide_agent_reasoning: Option, - - /// When set to `true`, `AgentReasoningRawContentEvent` events will be shown in the UI/output. - /// Defaults to `false`. - pub show_raw_agent_reasoning: Option, - - pub model_reasoning_effort: Option, - pub plan_mode_reasoning_effort: Option, - pub model_reasoning_summary: Option, - /// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`). - pub model_verbosity: Option, - - /// Override to force-enable reasoning summaries for the configured model. - pub model_supports_reasoning_summaries: Option, - - /// Optional path to a JSON model catalog (applied on startup only). - /// Per-thread `config` overrides are accepted but do not reapply this (no-ops). - pub model_catalog_json: Option, - - /// Optionally specify a personality for the model - pub personality: Option, - - /// Optional explicit service tier preference for new turns (`fast` or `flex`). - pub service_tier: Option, - - /// Base URL for requests to ChatGPT (as opposed to the OpenAI API). - pub chatgpt_base_url: Option, - - /// Base URL override for the built-in `openai` model provider. - pub openai_base_url: Option, - - /// Machine-local realtime audio device preferences used by realtime voice. - #[serde(default)] - pub audio: Option, - - /// Experimental / do not use. Overrides only the realtime conversation - /// websocket transport base URL (the `Op::RealtimeConversation` - /// `/v1/realtime` - /// connection) without changing normal provider HTTP requests. - pub experimental_realtime_ws_base_url: Option, - /// Experimental / do not use. Selects the realtime websocket model/snapshot - /// used for the `Op::RealtimeConversation` connection. - pub experimental_realtime_ws_model: Option, - /// Experimental / do not use. Realtime websocket session selection. - /// `version` controls v1/v2 and `type` controls conversational/transcription. - #[serde(default)] - pub realtime: Option, - /// Experimental / do not use. Overrides only the realtime conversation - /// websocket transport instructions (the `Op::RealtimeConversation` - /// `/ws` session.update instructions) without changing normal prompts. - pub experimental_realtime_ws_backend_prompt: Option, - /// Experimental / do not use. Replaces the synthesized realtime startup - /// context appended to websocket session instructions. An empty string - /// disables startup context injection entirely. - pub experimental_realtime_ws_startup_context: Option, - /// Experimental / do not use. Replaces the built-in realtime start - /// instructions inserted into developer messages when realtime becomes - /// active. - pub experimental_realtime_start_instructions: Option, - pub projects: Option>, - - /// Controls the web search tool mode: disabled, cached, or live. - pub web_search: Option, - - /// Nested tools section for feature toggles - pub tools: Option, - - /// Additional discoverable tools that can be suggested for installation. - pub tool_suggest: Option, - - /// Agent-related settings (thread limits, etc.). - pub agents: Option, - - /// Memories subsystem settings. - pub memories: Option, - - /// User-level skill config entries keyed by SKILL.md path. - pub skills: Option, - - /// User-level plugin config entries keyed by plugin name. - #[serde(default)] - pub plugins: HashMap, - - /// Centralized feature flags (new). Prefer this over individual toggles. - #[serde(default)] - // Injects known feature keys into the schema and forbids unknown keys. - #[schemars(schema_with = "crate::config::schema::features_schema")] - pub features: Option, - - /// Suppress warnings about unstable (under development) features. - pub suppress_unstable_features_warning: Option, - - /// Settings for ghost snapshots (used for undo). - #[serde(default)] - pub ghost_snapshot: Option, - - /// Markers used to detect the project root when searching parent - /// directories for `.codex` folders. Defaults to [".git"] when unset. - #[serde(default)] - pub project_root_markers: Option>, - - /// When `true`, checks for Codex updates on startup and surfaces update prompts. - /// Set to `false` only if your Codex updates are centrally managed. - /// Defaults to `true`. - pub check_for_update_on_startup: Option, - - /// When true, disables burst-paste detection for typed input entirely. - /// All characters are inserted as they are received, and no buffering - /// or placeholder replacement will occur for fast keypress bursts. - pub disable_paste_burst: Option, - - /// When `false`, disables analytics across Codex product surfaces in this machine. - /// Defaults to `true`. - pub analytics: Option, - - /// When `false`, disables feedback collection across Codex product surfaces. - /// Defaults to `true`. - pub feedback: Option, - - /// Settings for app-specific controls. - #[serde(default)] - pub apps: Option, - - /// OTEL configuration. - pub otel: Option, - - /// Windows-specific configuration. - #[serde(default)] - pub windows: Option, - - /// Tracks whether the Windows onboarding screen has been acknowledged. - pub windows_wsl_setup_acknowledged: Option, - - /// Collection of in-product notices (different from notifications) - /// See [`codex_config::types::Notice`] for more details - pub notice: Option, - - /// Legacy, now use features - /// Deprecated: ignored. Use `model_instructions_file`. - #[schemars(skip)] - pub experimental_instructions_file: Option, - pub experimental_compact_prompt_file: Option, - pub experimental_use_unified_exec_tool: Option, - pub experimental_use_freeform_apply_patch: Option, - /// Preferred OSS provider for local models, e.g. "lmstudio" or "ollama". - pub oss_provider: Option, -} - -impl From for UserSavedConfig { - fn from(config_toml: ConfigToml) -> Self { - let profiles = config_toml - .profiles - .into_iter() - .map(|(k, v)| (k, v.into())) - .collect(); - - Self { - approval_policy: config_toml.approval_policy, - sandbox_mode: config_toml.sandbox_mode, - sandbox_settings: config_toml.sandbox_workspace_write.map(From::from), - forced_chatgpt_workspace_id: config_toml.forced_chatgpt_workspace_id, - forced_login_method: config_toml.forced_login_method, - model: config_toml.model, - model_reasoning_effort: config_toml.model_reasoning_effort, - model_reasoning_summary: config_toml.model_reasoning_summary, - model_verbosity: config_toml.model_verbosity, - tools: config_toml.tools.map(From::from), - profile: config_toml.profile, - profiles, - } - } -} - -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema)] -#[schemars(deny_unknown_fields)] -pub struct ProjectConfig { - pub trust_level: Option, -} - -impl ProjectConfig { - pub fn is_trusted(&self) -> bool { - matches!(self.trust_level, Some(TrustLevel::Trusted)) - } - - pub fn is_untrusted(&self) -> bool { - matches!(self.trust_level, Some(TrustLevel::Untrusted)) - } -} - #[derive(Debug, Clone, Default, PartialEq, Eq)] -pub struct RealtimeAudioConfig { - pub microphone: Option, - pub speaker: Option, -} - -#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default, PartialEq, Eq, JsonSchema)] -#[serde(rename_all = "snake_case")] -pub enum RealtimeWsMode { - #[default] - Conversational, - Transcription, -} - -pub use codex_protocol::protocol::RealtimeConversationVersion as RealtimeWsVersion; - -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] -#[schemars(deny_unknown_fields)] -pub struct RealtimeConfig { - pub version: RealtimeWsVersion, - #[serde(rename = "type")] - pub session_type: RealtimeWsMode, -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] -#[schemars(deny_unknown_fields)] -pub struct RealtimeToml { - pub version: Option, - #[serde(rename = "type")] - pub session_type: Option, -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] -#[schemars(deny_unknown_fields)] -pub struct RealtimeAudioToml { - pub microphone: Option, - pub speaker: Option, -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, JsonSchema)] -#[schemars(deny_unknown_fields)] -pub struct ToolsToml { - #[serde( - default, - deserialize_with = "deserialize_optional_web_search_tool_config" - )] - pub web_search: Option, - - /// Enable the `view_image` tool that lets the agent attach local images. - #[serde(default)] - pub view_image: Option, -} - -#[derive(Deserialize)] -#[serde(untagged)] -enum WebSearchToolConfigInput { - Enabled(bool), - Config(WebSearchToolConfig), -} - -fn deserialize_optional_web_search_tool_config<'de, D>( - deserializer: D, -) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - let value = Option::::deserialize(deserializer)?; - - Ok(match value { - None => None, - Some(WebSearchToolConfigInput::Enabled(enabled)) => { - let _ = enabled; - None - } - Some(WebSearchToolConfigInput::Config(config)) => Some(config), - }) +pub struct AgentRoleConfig { + /// Human-facing role documentation used in spawn tool guidance. + /// Required for loaded user-defined roles after deprecated/new metadata precedence resolves. + pub description: Option, + /// Path to a role-specific config layer. + pub config_file: Option, + /// Candidate nicknames for agents spawned with this role. + pub nickname_candidates: Option>, } fn resolve_tool_suggest_config(config_toml: &ConfigToml) -> ToolSuggestConfig { @@ -1603,218 +1136,6 @@ fn resolve_tool_suggest_config(config_toml: &ConfigToml) -> ToolSuggestConfig { ToolSuggestConfig { discoverables } } -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] -#[schemars(deny_unknown_fields)] -pub struct AgentsToml { - /// Maximum number of agent threads that can be open concurrently. - /// When unset, no limit is enforced. - #[schemars(range(min = 1))] - pub max_threads: Option, - /// Maximum nesting depth allowed for spawned agent threads. - /// Root sessions start at depth 0. - #[schemars(range(min = 1))] - pub max_depth: Option, - /// Default maximum runtime in seconds for agent job workers. - #[schemars(range(min = 1))] - pub job_max_runtime_seconds: Option, - - /// User-defined role declarations keyed by role name. - /// - /// Example: - /// ```toml - /// [agents.researcher] - /// description = "Research-focused role." - /// config_file = "./agents/researcher.toml" - /// nickname_candidates = ["Herodotus", "Ibn Battuta"] - /// ``` - #[serde(default, flatten)] - pub roles: BTreeMap, -} - -#[derive(Debug, Clone, Default, PartialEq, Eq)] -pub struct AgentRoleConfig { - /// Human-facing role documentation used in spawn tool guidance. - /// Required for loaded user-defined roles after deprecated/new metadata precedence resolves. - pub description: Option, - /// Path to a role-specific config layer. - pub config_file: Option, - /// Candidate nicknames for agents spawned with this role. - pub nickname_candidates: Option>, -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] -#[schemars(deny_unknown_fields)] -pub struct AgentRoleToml { - /// Human-facing role documentation used in spawn tool guidance. - /// Required unless supplied by the referenced agent role file. - pub description: Option, - - /// Path to a role-specific config layer. - /// Relative paths are resolved relative to the `config.toml` that defines them. - pub config_file: Option, - - /// Candidate nicknames for agents spawned with this role. - pub nickname_candidates: Option>, -} - -impl From for Tools { - fn from(tools_toml: ToolsToml) -> Self { - Self { - web_search: tools_toml.web_search.is_some().then_some(true), - view_image: tools_toml.view_image, - } - } -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] -#[schemars(deny_unknown_fields)] -pub struct GhostSnapshotToml { - /// Exclude untracked files larger than this many bytes from ghost snapshots. - #[serde(alias = "ignore_untracked_files_over_bytes")] - pub ignore_large_untracked_files: Option, - /// Ignore untracked directories that contain this many files or more. - /// (Still emits a warning unless warnings are disabled.) - #[serde(alias = "large_untracked_dir_warning_threshold")] - pub ignore_large_untracked_dirs: Option, - /// Disable all ghost snapshot warning events. - pub disable_warnings: Option, -} - -impl ConfigToml { - /// Derive the effective sandbox policy from the configuration. - fn derive_sandbox_policy( - &self, - sandbox_mode_override: Option, - profile_sandbox_mode: Option, - windows_sandbox_level: WindowsSandboxLevel, - resolved_cwd: &Path, - sandbox_policy_constraint: Option<&Constrained>, - ) -> SandboxPolicy { - let sandbox_mode_was_explicit = sandbox_mode_override.is_some() - || profile_sandbox_mode.is_some() - || self.sandbox_mode.is_some(); - let resolved_sandbox_mode = sandbox_mode_override - .or(profile_sandbox_mode) - .or(self.sandbox_mode) - .or_else(|| { - // If no sandbox_mode is set but this directory has a trust decision, - // default to workspace-write except on unsandboxed Windows where we - // default to read-only. - self.get_active_project(resolved_cwd).and_then(|p| { - if p.is_trusted() || p.is_untrusted() { - if cfg!(target_os = "windows") - && windows_sandbox_level - == codex_protocol::config_types::WindowsSandboxLevel::Disabled - { - Some(SandboxMode::ReadOnly) - } else { - Some(SandboxMode::WorkspaceWrite) - } - } else { - None - } - }) - }) - .unwrap_or_default(); - let mut sandbox_policy = match resolved_sandbox_mode { - SandboxMode::ReadOnly => SandboxPolicy::new_read_only_policy(), - SandboxMode::WorkspaceWrite => match self.sandbox_workspace_write.as_ref() { - Some(SandboxWorkspaceWrite { - writable_roots, - network_access, - exclude_tmpdir_env_var, - exclude_slash_tmp, - }) => SandboxPolicy::WorkspaceWrite { - writable_roots: writable_roots.clone(), - read_only_access: ReadOnlyAccess::FullAccess, - network_access: *network_access, - exclude_tmpdir_env_var: *exclude_tmpdir_env_var, - exclude_slash_tmp: *exclude_slash_tmp, - }, - None => SandboxPolicy::new_workspace_write_policy(), - }, - SandboxMode::DangerFullAccess => SandboxPolicy::DangerFullAccess, - }; - let downgrade_workspace_write_if_unsupported = |policy: &mut SandboxPolicy| { - if cfg!(target_os = "windows") - // If the experimental Windows sandbox is enabled, do not force a downgrade. - && windows_sandbox_level - == codex_protocol::config_types::WindowsSandboxLevel::Disabled - && matches!(&*policy, SandboxPolicy::WorkspaceWrite { .. }) - { - *policy = SandboxPolicy::new_read_only_policy(); - } - }; - if matches!(resolved_sandbox_mode, SandboxMode::WorkspaceWrite) { - downgrade_workspace_write_if_unsupported(&mut sandbox_policy); - } - if !sandbox_mode_was_explicit - && let Some(constraint) = sandbox_policy_constraint - && let Err(err) = constraint.can_set(&sandbox_policy) - { - tracing::warn!( - error = %err, - "default sandbox policy is disallowed by requirements; falling back to required default" - ); - sandbox_policy = constraint.get().clone(); - downgrade_workspace_write_if_unsupported(&mut sandbox_policy); - } - sandbox_policy - } - - /// Resolves the cwd to an existing project, or returns None if ConfigToml - /// does not contain a project corresponding to cwd or a git repo for cwd - pub fn get_active_project(&self, resolved_cwd: &Path) -> Option { - let projects = self.projects.clone().unwrap_or_default(); - - let resolved_cwd_key = project_trust_key(resolved_cwd); - let resolved_cwd_raw_key = resolved_cwd.to_string_lossy().to_string(); - if let Some(project_config) = projects - .get(&resolved_cwd_key) - .or_else(|| projects.get(&resolved_cwd_raw_key)) - { - return Some(project_config.clone()); - } - - // If cwd lives inside a git repo/worktree, check whether the root git project - // (the primary repository working directory) is trusted. This lets - // worktrees inherit trust from the main project. - if let Some(repo_root) = resolve_root_git_project_for_trust(resolved_cwd) { - let repo_root_key = project_trust_key(repo_root.as_path()); - let repo_root_raw_key = repo_root.to_string_lossy().to_string(); - if let Some(project_config_for_root) = projects - .get(&repo_root_key) - .or_else(|| projects.get(&repo_root_raw_key)) - { - return Some(project_config_for_root.clone()); - } - } - - None - } - - pub fn get_config_profile( - &self, - override_profile: Option, - ) -> Result { - let profile = override_profile.or_else(|| self.profile.clone()); - - match profile { - Some(key) => { - if let Some(profile) = self.profiles.get(key.as_str()) { - return Ok(profile.clone()); - } - - Err(std::io::Error::new( - std::io::ErrorKind::NotFound, - format!("config profile `{key}` not found"), - )) - } - None => Ok(ConfigProfile::default()), - } - } -} - #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum PermissionConfigSyntax { Legacy, @@ -1883,8 +1204,8 @@ pub struct ConfigOverrides { pub js_repl_node_path: Option, pub js_repl_node_module_dirs: Option>, pub zsh_path: Option, - pub base_instructions: Option, - pub developer_instructions: Option, + pub base_instructions: Option>, + pub developer_instructions: Option>, pub personality: Option, pub compact_prompt: Option, pub include_apply_patch_tool: Option, @@ -1895,49 +1216,6 @@ pub struct ConfigOverrides { pub additional_writable_roots: Vec, } -fn validate_reserved_model_provider_ids( - model_providers: &HashMap, -) -> Result<(), String> { - let mut conflicts = model_providers - .keys() - .filter(|key| RESERVED_MODEL_PROVIDER_IDS.contains(&key.as_str())) - .map(|key| format!("`{key}`")) - .collect::>(); - conflicts.sort_unstable(); - if conflicts.is_empty() { - Ok(()) - } else { - Err(format!( - "model_providers contains reserved built-in provider IDs: {}. \ -Built-in providers cannot be overridden. Rename your custom provider (for example, `openai-custom`).", - conflicts.join(", ") - )) - } -} - -fn validate_model_providers( - model_providers: &HashMap, -) -> Result<(), String> { - validate_reserved_model_provider_ids(model_providers)?; - for (key, provider) in model_providers { - provider - .validate() - .map_err(|message| format!("model_providers.{key}: {message}"))?; - } - Ok(()) -} - -fn deserialize_model_providers<'de, D>( - deserializer: D, -) -> Result, D::Error> -where - D: serde::Deserializer<'de>, -{ - let model_providers = HashMap::::deserialize(deserializer)?; - validate_model_providers(&model_providers).map_err(serde::de::Error::custom)?; - Ok(model_providers) -} - /// Resolves the OSS provider from CLI override, profile config, or global config. /// Returns `None` if no provider is configured at any level. pub fn resolve_oss_provider( @@ -2171,7 +1449,7 @@ impl Config { let mut additional_writable_roots: Vec = additional_writable_roots .into_iter() .map(|path| AbsolutePathBuf::resolve_path_against_base(path, resolved_cwd.as_path())) - .collect::, _>>()?; + .collect(); let active_project = cfg .get_active_project(resolved_cwd.as_path()) .unwrap_or(ProjectConfig { trust_level: None }); @@ -2486,8 +1764,10 @@ impl Config { .or(cfg.model_instructions_file.as_ref()); let file_base_instructions = Self::try_read_non_empty_file(model_instructions_path, "model instructions file")?; - let base_instructions = base_instructions.or(file_base_instructions); - let developer_instructions = developer_instructions.or(cfg.developer_instructions); + let base_instructions = base_instructions.or_else(|| file_base_instructions.map(Some)); + let developer_instructions_override = developer_instructions.clone(); + let developer_instructions = + developer_instructions.unwrap_or_else(|| cfg.developer_instructions.clone()); let include_permissions_instructions = config_profile .include_permissions_instructions .or(cfg.include_permissions_instructions) @@ -2670,6 +1950,7 @@ impl Config { base_instructions, personality, developer_instructions, + developer_instructions_override, compact_prompt, commit_attribution, include_permissions_instructions, diff --git a/codex-rs/core/src/config/network_proxy_spec.rs b/codex-rs/core/src/config/network_proxy_spec.rs index 93b59cf5f5..0a37f46cdf 100644 --- a/codex-rs/core/src/config/network_proxy_spec.rs +++ b/codex-rs/core/src/config/network_proxy_spec.rs @@ -20,6 +20,8 @@ use codex_protocol::protocol::SandboxPolicy; use std::collections::HashSet; use std::sync::Arc; +const GLOBAL_ALLOWLIST_PATTERN: &str = "*"; + #[derive(Debug, Clone, PartialEq, Eq)] pub struct NetworkProxySpec { config: NetworkProxyConfig, @@ -195,6 +197,8 @@ impl NetworkProxySpec { let allowlist_expansion_enabled = Self::allowlist_expansion_enabled(sandbox_policy, hard_deny_allowlist_misses); let denylist_expansion_enabled = Self::denylist_expansion_enabled(sandbox_policy); + let danger_full_access_denylist_only = + Self::danger_full_access_denylist_only_enabled(requirements, sandbox_policy); if let Some(enabled) = requirements.enabled { config.network.enabled = enabled; @@ -225,37 +229,43 @@ impl NetworkProxySpec { constraints.dangerously_allow_all_unix_sockets = Some(dangerously_allow_all_unix_sockets); } - let managed_allowed_domains = if hard_deny_allowlist_misses { - Some( + if danger_full_access_denylist_only { + config + .network + .set_allowed_domains(vec![GLOBAL_ALLOWLIST_PATTERN.to_string()]); + } else { + let managed_allowed_domains = if hard_deny_allowlist_misses { + Some( + requirements + .domains + .as_ref() + .and_then(codex_config::NetworkDomainPermissionsToml::allowed_domains) + .unwrap_or_default(), + ) + } else { requirements .domains .as_ref() .and_then(codex_config::NetworkDomainPermissionsToml::allowed_domains) - .unwrap_or_default(), - ) - } else { - requirements - .domains - .as_ref() - .and_then(codex_config::NetworkDomainPermissionsToml::allowed_domains) - }; - if let Some(managed_allowed_domains) = managed_allowed_domains { - // Managed requirements seed the baseline allowlist. User additions - // can extend that baseline unless managed-only mode pins the - // effective allowlist to the managed set. - let effective_allowed_domains = if allowlist_expansion_enabled { - Self::merge_domain_lists( - managed_allowed_domains.clone(), - config.network.allowed_domains().as_deref().unwrap_or(&[]), - ) - } else { - managed_allowed_domains.clone() }; - config - .network - .set_allowed_domains(effective_allowed_domains); - constraints.allowed_domains = Some(managed_allowed_domains); - constraints.allowlist_expansion_enabled = Some(allowlist_expansion_enabled); + if let Some(managed_allowed_domains) = managed_allowed_domains { + // Managed requirements seed the baseline allowlist. User additions + // can extend that baseline unless managed-only mode pins the + // effective allowlist to the managed set. + let effective_allowed_domains = if allowlist_expansion_enabled { + Self::merge_domain_lists( + managed_allowed_domains.clone(), + config.network.allowed_domains().as_deref().unwrap_or(&[]), + ) + } else { + managed_allowed_domains.clone() + }; + config + .network + .set_allowed_domains(effective_allowed_domains); + constraints.allowed_domains = Some(managed_allowed_domains); + constraints.allowlist_expansion_enabled = Some(allowlist_expansion_enabled); + } } let managed_denied_domains = requirements .domains @@ -274,7 +284,7 @@ impl NetworkProxySpec { constraints.denied_domains = Some(managed_denied_domains); constraints.denylist_expansion_enabled = Some(denylist_expansion_enabled); } - if requirements.unix_sockets.is_some() { + if requirements.unix_sockets.is_some() && !danger_full_access_denylist_only { let allow_unix_sockets = requirements .unix_sockets .as_ref() @@ -289,6 +299,14 @@ impl NetworkProxySpec { config.network.allow_local_binding = allow_local_binding; constraints.allow_local_binding = Some(allow_local_binding); } + if danger_full_access_denylist_only { + config.network.allow_upstream_proxy = true; + constraints.allow_upstream_proxy = Some(true); + config.network.dangerously_allow_all_unix_sockets = true; + constraints.dangerously_allow_all_unix_sockets = Some(true); + config.network.allow_local_binding = true; + constraints.allow_local_binding = Some(true); + } (config, constraints) } @@ -307,6 +325,16 @@ impl NetworkProxySpec { requirements.managed_allowed_domains_only.unwrap_or(false) } + fn danger_full_access_denylist_only_enabled( + requirements: &NetworkConstraints, + sandbox_policy: &SandboxPolicy, + ) -> bool { + matches!(sandbox_policy, SandboxPolicy::DangerFullAccess) + && requirements + .danger_full_access_denylist_only + .unwrap_or(false) + } + fn denylist_expansion_enabled(sandbox_policy: &SandboxPolicy) -> bool { matches!( sandbox_policy, diff --git a/codex-rs/core/src/config/network_proxy_spec_tests.rs b/codex-rs/core/src/config/network_proxy_spec_tests.rs index 77007885a4..742512d600 100644 --- a/codex-rs/core/src/config/network_proxy_spec_tests.rs +++ b/codex-rs/core/src/config/network_proxy_spec_tests.rs @@ -1,8 +1,11 @@ use super::*; use crate::config_loader::NetworkDomainPermissionToml; use crate::config_loader::NetworkDomainPermissionsToml; +use crate::config_loader::NetworkUnixSocketPermissionToml; +use crate::config_loader::NetworkUnixSocketPermissionsToml; use codex_network_proxy::NetworkDomainPermission; use pretty_assertions::assert_eq; +use std::collections::BTreeMap; fn domain_permissions( entries: impl IntoIterator, @@ -178,6 +181,147 @@ fn danger_full_access_keeps_managed_allowlist_and_denylist_fixed() { assert_eq!(spec.constraints.denylist_expansion_enabled, Some(false)); } +#[test] +fn danger_full_access_denylist_only_allows_all_domains_and_enforces_managed_denies() { + let mut config = NetworkProxyConfig::default(); + config + .network + .set_allowed_domains(vec!["evil.com".to_string()]); + config + .network + .set_denied_domains(vec!["more-blocked.example.com".to_string()]); + let requirements = NetworkConstraints { + allow_upstream_proxy: Some(false), + dangerously_allow_all_unix_sockets: Some(false), + domains: Some(domain_permissions([ + ("*.example.com", NetworkDomainPermissionToml::Allow), + ("blocked.example.com", NetworkDomainPermissionToml::Deny), + ])), + danger_full_access_denylist_only: Some(true), + unix_sockets: Some(NetworkUnixSocketPermissionsToml { + entries: BTreeMap::from([( + "/tmp/managed.sock".to_string(), + NetworkUnixSocketPermissionToml::Allow, + )]), + }), + allow_local_binding: Some(false), + ..Default::default() + }; + + let spec = NetworkProxySpec::from_config_and_constraints( + config, + Some(requirements), + &SandboxPolicy::DangerFullAccess, + ) + .expect("denylist-only yolo mode should allow all domains except managed denies"); + + assert_eq!( + spec.config.network.allowed_domains(), + Some(vec!["*".to_string()]) + ); + assert_eq!( + spec.config.network.denied_domains(), + Some(vec!["blocked.example.com".to_string()]) + ); + assert!(spec.config.network.allow_upstream_proxy); + assert!(spec.config.network.dangerously_allow_all_unix_sockets); + assert!(spec.config.network.allow_local_binding); + assert_eq!(spec.constraints.allow_upstream_proxy, Some(true)); + assert_eq!( + spec.constraints.dangerously_allow_all_unix_sockets, + Some(true) + ); + assert_eq!(spec.constraints.allow_unix_sockets, None); + assert_eq!(spec.constraints.allow_local_binding, Some(true)); + assert_eq!(spec.constraints.allowed_domains, None); + assert_eq!(spec.constraints.allowlist_expansion_enabled, None); + assert_eq!( + spec.constraints.denied_domains, + Some(vec!["blocked.example.com".to_string()]) + ); + assert_eq!(spec.constraints.denylist_expansion_enabled, Some(false)); +} + +#[test] +fn danger_full_access_denylist_only_does_not_change_workspace_write_behavior() { + let mut config = NetworkProxyConfig::default(); + config + .network + .set_allowed_domains(vec!["api.example.com".to_string()]); + config + .network + .set_denied_domains(vec!["blocked.example.com".to_string()]); + let requirements = NetworkConstraints { + allow_upstream_proxy: Some(false), + dangerously_allow_all_unix_sockets: Some(false), + domains: Some(domain_permissions([ + ("*.example.com", NetworkDomainPermissionToml::Allow), + ( + "managed-blocked.example.com", + NetworkDomainPermissionToml::Deny, + ), + ])), + danger_full_access_denylist_only: Some(true), + unix_sockets: Some(NetworkUnixSocketPermissionsToml { + entries: BTreeMap::from([( + "/tmp/managed.sock".to_string(), + NetworkUnixSocketPermissionToml::Allow, + )]), + }), + allow_local_binding: Some(false), + ..Default::default() + }; + + let spec = NetworkProxySpec::from_config_and_constraints( + config, + Some(requirements), + &SandboxPolicy::new_workspace_write_policy(), + ) + .expect("denylist-only yolo flag should not affect workspace-write mode"); + + assert_eq!( + spec.config.network.allowed_domains(), + Some(vec![ + "*.example.com".to_string(), + "api.example.com".to_string() + ]) + ); + assert_eq!( + spec.config.network.denied_domains(), + Some(vec![ + "managed-blocked.example.com".to_string(), + "blocked.example.com".to_string() + ]) + ); + assert!(!spec.config.network.allow_upstream_proxy); + assert!(!spec.config.network.dangerously_allow_all_unix_sockets); + assert_eq!( + spec.config.network.allow_unix_sockets(), + vec!["/tmp/managed.sock".to_string()] + ); + assert!(!spec.config.network.allow_local_binding); + assert_eq!(spec.constraints.allow_upstream_proxy, Some(false)); + assert_eq!( + spec.constraints.dangerously_allow_all_unix_sockets, + Some(false) + ); + assert_eq!( + spec.constraints.allow_unix_sockets, + Some(vec!["/tmp/managed.sock".to_string()]) + ); + assert_eq!(spec.constraints.allow_local_binding, Some(false)); + assert_eq!( + spec.constraints.allowed_domains, + Some(vec!["*.example.com".to_string()]) + ); + assert_eq!(spec.constraints.allowlist_expansion_enabled, Some(true)); + assert_eq!( + spec.constraints.denied_domains, + Some(vec!["managed-blocked.example.com".to_string()]) + ); + assert_eq!(spec.constraints.denylist_expansion_enabled, Some(true)); +} + #[test] fn managed_allowed_domains_only_disables_default_mode_allowlist_expansion() { let mut config = NetworkProxyConfig::default(); diff --git a/codex-rs/core/src/config/permissions.rs b/codex-rs/core/src/config/permissions.rs index 73dad1c73c..826d0139bf 100644 --- a/codex-rs/core/src/config/permissions.rs +++ b/codex-rs/core/src/config/permissions.rs @@ -1,256 +1,22 @@ use std::borrow::Cow; -use std::collections::BTreeMap; use std::io; use std::path::Component; use std::path::Path; use std::path::PathBuf; -use codex_network_proxy::NetworkDomainPermission as ProxyNetworkDomainPermission; -use codex_network_proxy::NetworkMode; +use codex_config::permissions_toml::FilesystemPermissionToml; +use codex_config::permissions_toml::NetworkToml; +use codex_config::permissions_toml::PermissionProfileToml; +use codex_config::permissions_toml::PermissionsToml; use codex_network_proxy::NetworkProxyConfig; +#[cfg(test)] use codex_network_proxy::NetworkUnixSocketPermission as ProxyNetworkUnixSocketPermission; -use codex_network_proxy::normalize_host; -use codex_protocol::permissions::FileSystemAccessMode; use codex_protocol::permissions::FileSystemPath; use codex_protocol::permissions::FileSystemSandboxEntry; use codex_protocol::permissions::FileSystemSandboxPolicy; use codex_protocol::permissions::FileSystemSpecialPath; use codex_protocol::permissions::NetworkSandboxPolicy; use codex_utils_absolute_path::AbsolutePathBuf; -use schemars::JsonSchema; -use serde::Deserialize; -use serde::Serialize; - -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] -pub struct PermissionsToml { - #[serde(flatten)] - pub entries: BTreeMap, -} - -impl PermissionsToml { - pub fn is_empty(&self) -> bool { - self.entries.is_empty() - } -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] -#[schemars(deny_unknown_fields)] -pub struct PermissionProfileToml { - pub filesystem: Option, - pub network: Option, -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] -pub struct FilesystemPermissionsToml { - #[serde(flatten)] - pub entries: BTreeMap, -} - -impl FilesystemPermissionsToml { - pub fn is_empty(&self) -> bool { - self.entries.is_empty() - } -} - -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema)] -#[serde(untagged)] -pub enum FilesystemPermissionToml { - Access(FileSystemAccessMode), - Scoped(BTreeMap), -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] -pub struct NetworkDomainPermissionsToml { - #[serde(flatten)] - pub entries: BTreeMap, -} - -impl NetworkDomainPermissionsToml { - pub fn is_empty(&self) -> bool { - self.entries.is_empty() - } - - #[cfg(test)] - pub(crate) fn allowed_domains(&self) -> Option> { - let allowed_domains: Vec = self - .entries - .iter() - .filter(|(_, permission)| matches!(permission, NetworkDomainPermissionToml::Allow)) - .map(|(pattern, _)| pattern.clone()) - .collect(); - (!allowed_domains.is_empty()).then_some(allowed_domains) - } - - #[cfg(test)] - pub(crate) fn denied_domains(&self) -> Option> { - let denied_domains: Vec = self - .entries - .iter() - .filter(|(_, permission)| matches!(permission, NetworkDomainPermissionToml::Deny)) - .map(|(pattern, _)| pattern.clone()) - .collect(); - (!denied_domains.is_empty()).then_some(denied_domains) - } -} - -#[derive( - Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, JsonSchema, -)] -#[serde(rename_all = "lowercase")] -pub enum NetworkDomainPermissionToml { - Allow, - Deny, -} - -impl std::fmt::Display for NetworkDomainPermissionToml { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let permission = match self { - Self::Allow => "allow", - Self::Deny => "deny", - }; - f.write_str(permission) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] -pub struct NetworkUnixSocketPermissionsToml { - #[serde(flatten)] - pub entries: BTreeMap, -} - -impl NetworkUnixSocketPermissionsToml { - pub fn is_empty(&self) -> bool { - self.entries.is_empty() - } - - pub(crate) fn allow_unix_sockets(&self) -> Vec { - self.entries - .iter() - .filter(|(_, permission)| matches!(permission, NetworkUnixSocketPermissionToml::Allow)) - .map(|(path, _)| path.clone()) - .collect() - } -} - -#[derive( - Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, JsonSchema, -)] -#[serde(rename_all = "lowercase")] -pub enum NetworkUnixSocketPermissionToml { - Allow, - None, -} - -impl std::fmt::Display for NetworkUnixSocketPermissionToml { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let permission = match self { - Self::Allow => "allow", - Self::None => "none", - }; - f.write_str(permission) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)] -#[schemars(deny_unknown_fields)] -pub struct NetworkToml { - pub enabled: Option, - pub proxy_url: Option, - pub enable_socks5: Option, - pub socks_url: Option, - pub enable_socks5_udp: Option, - pub allow_upstream_proxy: Option, - pub dangerously_allow_non_loopback_proxy: Option, - pub dangerously_allow_all_unix_sockets: Option, - #[schemars(with = "Option")] - pub mode: Option, - pub domains: Option, - pub unix_sockets: Option, - pub allow_local_binding: Option, -} - -#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, JsonSchema)] -#[serde(rename_all = "lowercase")] -enum NetworkModeSchema { - Limited, - Full, -} - -impl NetworkToml { - pub(crate) fn apply_to_network_proxy_config(&self, config: &mut NetworkProxyConfig) { - if let Some(enabled) = self.enabled { - config.network.enabled = enabled; - } - if let Some(proxy_url) = self.proxy_url.as_ref() { - config.network.proxy_url = proxy_url.clone(); - } - if let Some(enable_socks5) = self.enable_socks5 { - config.network.enable_socks5 = enable_socks5; - } - if let Some(socks_url) = self.socks_url.as_ref() { - config.network.socks_url = socks_url.clone(); - } - if let Some(enable_socks5_udp) = self.enable_socks5_udp { - config.network.enable_socks5_udp = enable_socks5_udp; - } - if let Some(allow_upstream_proxy) = self.allow_upstream_proxy { - config.network.allow_upstream_proxy = allow_upstream_proxy; - } - if let Some(dangerously_allow_non_loopback_proxy) = - self.dangerously_allow_non_loopback_proxy - { - config.network.dangerously_allow_non_loopback_proxy = - dangerously_allow_non_loopback_proxy; - } - if let Some(dangerously_allow_all_unix_sockets) = self.dangerously_allow_all_unix_sockets { - config.network.dangerously_allow_all_unix_sockets = dangerously_allow_all_unix_sockets; - } - if let Some(mode) = self.mode { - config.network.mode = mode; - } - if let Some(domains) = self.domains.as_ref() { - overlay_network_domain_permissions(config, domains); - } - if let Some(unix_sockets) = self.unix_sockets.as_ref() { - let mut proxy_unix_sockets = config.network.unix_sockets.take().unwrap_or_default(); - for (path, permission) in &unix_sockets.entries { - let permission = match permission { - NetworkUnixSocketPermissionToml::Allow => { - ProxyNetworkUnixSocketPermission::Allow - } - NetworkUnixSocketPermissionToml::None => ProxyNetworkUnixSocketPermission::None, - }; - proxy_unix_sockets.entries.insert(path.clone(), permission); - } - config.network.unix_sockets = - (!proxy_unix_sockets.entries.is_empty()).then_some(proxy_unix_sockets); - } - if let Some(allow_local_binding) = self.allow_local_binding { - config.network.allow_local_binding = allow_local_binding; - } - } - - pub(crate) fn to_network_proxy_config(&self) -> NetworkProxyConfig { - let mut config = NetworkProxyConfig::default(); - self.apply_to_network_proxy_config(&mut config); - config - } -} - -pub(crate) fn overlay_network_domain_permissions( - config: &mut NetworkProxyConfig, - domains: &NetworkDomainPermissionsToml, -) { - for (pattern, permission) in &domains.entries { - let permission = match permission { - NetworkDomainPermissionToml::Allow => ProxyNetworkDomainPermission::Allow, - NetworkDomainPermissionToml::Deny => ProxyNetworkDomainPermission::Deny, - }; - config - .network - .upsert_domain_permission(pattern.clone(), permission, normalize_host); - } -} pub(crate) fn network_proxy_config_from_profile_network( network: Option<&NetworkToml>, @@ -416,7 +182,7 @@ fn compile_scoped_filesystem_path( let subpath = parse_relative_subpath(subpath)?; let base = parse_absolute_path(path)?; - let path = AbsolutePathBuf::resolve_path_against_base(&subpath, base.as_path())?; + let path = AbsolutePathBuf::resolve_path_against_base(&subpath, base.as_path()); Ok(FileSystemPath::Path { path }) } diff --git a/codex-rs/core/src/config/permissions_tests.rs b/codex-rs/core/src/config/permissions_tests.rs index e3ea67d7be..e9191dbfa7 100644 --- a/codex-rs/core/src/config/permissions_tests.rs +++ b/codex-rs/core/src/config/permissions_tests.rs @@ -1,7 +1,15 @@ use super::*; use crate::config::Config; use crate::config::ConfigOverrides; -use crate::config::ConfigToml; +use codex_config::config_toml::ConfigToml; +use codex_config::permissions_toml::FilesystemPermissionsToml; +use codex_config::permissions_toml::NetworkDomainPermissionToml; +use codex_config::permissions_toml::NetworkDomainPermissionsToml; +use codex_config::permissions_toml::NetworkToml; +use codex_config::permissions_toml::NetworkUnixSocketPermissionToml; +use codex_config::permissions_toml::NetworkUnixSocketPermissionsToml; +use codex_config::permissions_toml::PermissionProfileToml; +use codex_config::permissions_toml::PermissionsToml; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use std::collections::BTreeMap; diff --git a/codex-rs/core/src/config/schema.rs b/codex-rs/core/src/config/schema.rs index bde38f7eb5..9507aff585 100644 --- a/codex-rs/core/src/config/schema.rs +++ b/codex-rs/core/src/config/schema.rs @@ -1,103 +1,6 @@ -use crate::config::ConfigToml; -use codex_config::types::RawMcpServerConfig; -use codex_features::FEATURES; -use codex_features::legacy_feature_keys; -use schemars::r#gen::SchemaGenerator; -use schemars::r#gen::SchemaSettings; -use schemars::schema::InstanceType; -use schemars::schema::ObjectValidation; -use schemars::schema::RootSchema; -use schemars::schema::Schema; -use schemars::schema::SchemaObject; -use serde_json::Map; -use serde_json::Value; -use std::path::Path; - -/// Schema for the `[features]` map with known + legacy keys only. -pub(crate) fn features_schema(schema_gen: &mut SchemaGenerator) -> Schema { - let mut object = SchemaObject { - instance_type: Some(InstanceType::Object.into()), - ..Default::default() - }; - - let mut validation = ObjectValidation::default(); - for feature in FEATURES { - if feature.id == codex_features::Feature::Artifact { - continue; - } - validation - .properties - .insert(feature.key.to_string(), schema_gen.subschema_for::()); - } - for legacy_key in legacy_feature_keys() { - validation - .properties - .insert(legacy_key.to_string(), schema_gen.subschema_for::()); - } - validation.additional_properties = Some(Box::new(Schema::Bool(false))); - object.object = Some(Box::new(validation)); - - Schema::Object(object) -} - -/// Schema for the `[mcp_servers]` map using the raw input shape. -pub(crate) fn mcp_servers_schema(schema_gen: &mut SchemaGenerator) -> Schema { - let mut object = SchemaObject { - instance_type: Some(InstanceType::Object.into()), - ..Default::default() - }; - - let validation = ObjectValidation { - additional_properties: Some(Box::new(schema_gen.subschema_for::())), - ..Default::default() - }; - object.object = Some(Box::new(validation)); - - Schema::Object(object) -} - -/// Build the config schema for `config.toml`. -pub fn config_schema() -> RootSchema { - SchemaSettings::draft07() - .with(|settings| { - settings.option_add_null_type = false; - }) - .into_generator() - .into_root_schema_for::() -} - -/// Canonicalize a JSON value by sorting its keys. -fn canonicalize(value: &Value) -> Value { - match value { - Value::Array(items) => Value::Array(items.iter().map(canonicalize).collect()), - Value::Object(map) => { - let mut entries: Vec<_> = map.iter().collect(); - entries.sort_by(|(left, _), (right, _)| left.cmp(right)); - let mut sorted = Map::with_capacity(map.len()); - for (key, child) in entries { - sorted.insert(key.clone(), canonicalize(child)); - } - Value::Object(sorted) - } - _ => value.clone(), - } -} - -/// Render the config schema as pretty-printed JSON. -pub fn config_schema_json() -> anyhow::Result> { - let schema = config_schema(); - let value = serde_json::to_value(schema)?; - let value = canonicalize(&value); - let json = serde_json::to_vec_pretty(&value)?; - Ok(json) -} - -/// Write the config schema fixture to disk. -pub fn write_config_schema(out_path: &Path) -> anyhow::Result<()> { - let json = config_schema_json()?; - std::fs::write(out_path, json)?; - Ok(()) -} +use codex_config::schema::canonicalize; +use codex_config::schema::config_schema_json; +use codex_config::schema::write_config_schema; #[cfg(test)] #[path = "schema_tests.rs"] diff --git a/codex-rs/core/src/config/service.rs b/codex-rs/core/src/config/service.rs index e878270b80..80d4b6bdd9 100644 --- a/codex-rs/core/src/config/service.rs +++ b/codex-rs/core/src/config/service.rs @@ -1,4 +1,3 @@ -use super::ConfigToml; use super::deserialize_config_toml_with_base; use crate::config::edit::ConfigEdit; use crate::config::edit::ConfigEditsBuilder; @@ -29,6 +28,7 @@ use codex_app_server_protocol::MergeStrategy; use codex_app_server_protocol::OverriddenMetadata; use codex_app_server_protocol::WriteStatus; use codex_config::CONFIG_TOML_FILE; +use codex_config::config_toml::ConfigToml; use codex_utils_absolute_path::AbsolutePathBuf; use serde_json::Value as JsonValue; use std::borrow::Cow; @@ -265,8 +265,7 @@ impl ConfigService { edits: Vec<(String, JsonValue, MergeStrategy)>, ) -> Result { let allowed_path = - AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, &self.codex_home) - .map_err(|err| ConfigServiceError::io("failed to resolve user config path", err))?; + AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, &self.codex_home); let provided_path = match file_path { Some(path) => AbsolutePathBuf::from_absolute_path(PathBuf::from(path)) .map_err(|err| ConfigServiceError::io("failed to resolve user config path", err))?, diff --git a/codex-rs/core/src/config_loader/mod.rs b/codex-rs/core/src/config_loader/mod.rs index 48555f0b92..f40482fd57 100644 --- a/codex-rs/core/src/config_loader/mod.rs +++ b/codex-rs/core/src/config_loader/mod.rs @@ -5,11 +5,12 @@ mod macos; #[cfg(test)] mod tests; -use crate::config::ConfigToml; use crate::config_loader::layer_io::LoadedConfigLayers; use codex_app_server_protocol::ConfigLayerSource; use codex_config::CONFIG_TOML_FILE; use codex_config::ConfigRequirementsWithSources; +use codex_config::config_toml::ConfigToml; +use codex_config::config_toml::ProjectConfig; use codex_git_utils::resolve_root_git_project_for_trust; use codex_protocol::config_types::ApprovalsReviewer; use codex_protocol::config_types::SandboxMode; @@ -186,7 +187,7 @@ pub async fn load_config_layers_state( // Add a layer for $CODEX_HOME/config.toml if it exists. Note if the file // exists, but is malformed, then this error should be propagated to the // user. - let user_file = AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, codex_home)?; + let user_file = AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, codex_home); let user_layer = load_config_toml_for_required_layer(&user_file, |config_toml| { ConfigLayerEntry::new( ConfigLayerSource::User { @@ -544,7 +545,7 @@ struct ProjectTrustContext { #[derive(Deserialize)] struct ProjectTrustConfigToml { - projects: Option>, + projects: Option>, } struct ProjectTrustDecision { @@ -808,7 +809,7 @@ async fn load_project_layers( if dot_codex_abs == codex_home_abs || dot_codex_normalized == codex_home_normalized { continue; } - let config_file = dot_codex_abs.join(CONFIG_TOML_FILE)?; + let config_file = dot_codex_abs.join(CONFIG_TOML_FILE); match tokio::fs::read_to_string(&config_file).await { Ok(contents) => { let config: TomlValue = match toml::from_str(&contents) { @@ -944,7 +945,7 @@ foo = "xyzzy" expected_toml_value.insert( "model_instructions_file".to_string(), TomlValue::String( - AbsolutePathBuf::resolve_path_against_base("./some_file.md", base_dir)? + AbsolutePathBuf::resolve_path_against_base("./some_file.md", base_dir) .as_path() .to_string_lossy() .to_string(), diff --git a/codex-rs/core/src/config_loader/tests.rs b/codex-rs/core/src/config_loader/tests.rs index ea782922b5..d476b8e6fd 100644 --- a/codex-rs/core/src/config_loader/tests.rs +++ b/codex-rs/core/src/config_loader/tests.rs @@ -2,9 +2,7 @@ use super::LoaderOverrides; use super::load_config_layers_state; use crate::config::ConfigBuilder; use crate::config::ConfigOverrides; -use crate::config::ConfigToml; use crate::config::ConstraintError; -use crate::config::ProjectConfig; use crate::config_loader::CloudRequirementsLoadError; use crate::config_loader::CloudRequirementsLoader; use crate::config_loader::ConfigLayerEntry; @@ -16,6 +14,8 @@ use crate::config_loader::RequirementSource; use crate::config_loader::load_requirements_toml; use crate::config_loader::version_for_toml; use codex_config::CONFIG_TOML_FILE; +use codex_config::config_toml::ConfigToml; +use codex_config::config_toml::ProjectConfig; use codex_protocol::config_types::TrustLevel; use codex_protocol::config_types::WebSearchMode; use codex_protocol::protocol::AskForApproval; @@ -78,7 +78,7 @@ async fn cli_overrides_resolve_relative_paths_against_cwd() -> std::io::Result<( .build() .await?; - let expected = AbsolutePathBuf::resolve_path_against_base("run-logs", cwd_path)?; + let expected = AbsolutePathBuf::resolve_path_against_base("run-logs", cwd_path); assert_eq!(config.log_dir, expected.to_path_buf()); Ok(()) } @@ -250,7 +250,6 @@ async fn returns_empty_when_all_layers_missing() { &ConfigLayerEntry { name: super::ConfigLayerSource::User { file: AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, tmp.path()) - .expect("resolve user config.toml path") }, config: TomlValue::Table(toml::map::Map::new()), raw_toml: None, @@ -905,7 +904,7 @@ model_instructions_file = "child.txt" .await?; assert_eq!( - config.base_instructions.as_deref(), + config.base_instructions.as_ref().and_then(Option::as_deref), Some("child instructions") ); @@ -941,7 +940,7 @@ async fn cli_override_model_instructions_file_sets_base_instructions() -> std::i .await?; assert_eq!( - config.base_instructions.as_deref(), + config.base_instructions.as_ref().and_then(Option::as_deref), Some("cli override instructions") ); diff --git a/codex-rs/core/src/connectors.rs b/codex-rs/core/src/connectors.rs index 10e10e99fc..ad3447eaf3 100644 --- a/codex-rs/core/src/connectors.rs +++ b/codex-rs/core/src/connectors.rs @@ -40,14 +40,14 @@ use codex_login::CodexAuth; use codex_login::default_client::create_client; use codex_login::default_client::is_first_party_chat_originator; use codex_login::default_client::originator; -use codex_mcp::mcp::CODEX_APPS_MCP_SERVER_NAME; -use codex_mcp::mcp::ToolPluginProvenance; -use codex_mcp::mcp::auth::compute_auth_statuses; -use codex_mcp::mcp::with_codex_apps_mcp; -use codex_mcp::mcp_connection_manager::McpConnectionManager; -use codex_mcp::mcp_connection_manager::SandboxState; -use codex_mcp::mcp_connection_manager::ToolInfo; -use codex_mcp::mcp_connection_manager::codex_apps_tools_cache_key; +use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; +use codex_mcp::McpConnectionManager; +use codex_mcp::SandboxState; +use codex_mcp::ToolInfo; +use codex_mcp::ToolPluginProvenance; +use codex_mcp::codex_apps_tools_cache_key; +use codex_mcp::compute_auth_statuses; +use codex_mcp::with_codex_apps_mcp; pub use codex_connectors::CONNECTORS_CACHE_TTL; const CONNECTORS_READY_TIMEOUT_ON_EMPTY_TOOLS: Duration = Duration::from_secs(30); @@ -143,9 +143,13 @@ pub(crate) async fn list_tool_suggest_discoverable_tools_with_auth( pub async fn list_cached_accessible_connectors_from_mcp_tools( config: &Config, ) -> Option> { - let auth_manager = auth_manager_from_config(config); + let auth_manager = + AuthManager::shared_from_config(config, /*enable_codex_api_key_env*/ false); let auth = auth_manager.auth().await; - if !config.features.apps_enabled_for_auth(auth.as_ref()) { + if !config + .features + .apps_enabled_for_auth(auth.as_ref().is_some_and(CodexAuth::is_chatgpt_auth)) + { return Some(Vec::new()); } let cache_key = accessible_connectors_cache_key(config, auth.as_ref()); @@ -182,9 +186,13 @@ pub async fn list_accessible_connectors_from_mcp_tools_with_options_and_status( config: &Config, force_refetch: bool, ) -> anyhow::Result { - let auth_manager = auth_manager_from_config(config); + let auth_manager = + AuthManager::shared_from_config(config, /*enable_codex_api_key_env*/ false); let auth = auth_manager.auth().await; - if !config.features.apps_enabled_for_auth(auth.as_ref()) { + if !config + .features + .apps_enabled_for_auth(auth.as_ref().is_some_and(CodexAuth::is_chatgpt_auth)) + { return Ok(AccessibleConnectorsStatus { connectors: Vec::new(), codex_apps_ready: true, @@ -417,7 +425,8 @@ async fn list_directory_connectors_for_tool_suggest_with_auth( let token_data = if let Some(auth) = auth { auth.get_token_data().ok() } else { - let auth_manager = auth_manager_from_config(config); + let auth_manager = + AuthManager::shared_from_config(config, /*enable_codex_api_key_env*/ false); auth_manager .auth() .await @@ -492,14 +501,6 @@ async fn chatgpt_get_request_with_token( } } -fn auth_manager_from_config(config: &Config) -> std::sync::Arc { - AuthManager::shared( - config.codex_home.clone(), - /*enable_codex_api_key_env*/ false, - config.cli_auth_credentials_store_mode, - ) -} - pub fn connector_display_label(connector: &AppInfo) -> String { format_connector_label(&connector.name, &connector.id) } diff --git a/codex-rs/core/src/connectors_tests.rs b/codex-rs/core/src/connectors_tests.rs index 893ab243ca..7a45a97a1f 100644 --- a/codex-rs/core/src/connectors_tests.rs +++ b/codex-rs/core/src/connectors_tests.rs @@ -12,8 +12,8 @@ use codex_config::types::AppToolConfig; use codex_config::types::AppToolsConfig; use codex_config::types::AppsDefaultConfig; use codex_features::Feature; -use codex_mcp::mcp::CODEX_APPS_MCP_SERVER_NAME; -use codex_mcp::mcp_connection_manager::ToolInfo; +use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; +use codex_mcp::ToolInfo; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use rmcp::model::JsonObject; diff --git a/codex-rs/core/src/exec.rs b/codex-rs/core/src/exec.rs index dcfc7c7514..eb510a5ed0 100644 --- a/codex-rs/core/src/exec.rs +++ b/codex-rs/core/src/exec.rs @@ -416,7 +416,7 @@ fn record_windows_sandbox_spawn_failure( } else { "legacy" }; - if let Some(metrics) = codex_otel::metrics::global() { + if let Some(metrics) = codex_otel::global() { let _ = metrics.counter( "codex.windows_sandbox.createprocessasuserw_failed", /*inc*/ 1, diff --git a/codex-rs/core/src/exec_policy.rs b/codex-rs/core/src/exec_policy.rs index dbfab81d32..98bb815e91 100644 --- a/codex-rs/core/src/exec_policy.rs +++ b/codex-rs/core/src/exec_policy.rs @@ -494,8 +494,7 @@ pub async fn load_exec_policy(config_stack: &ConfigLayerStack) -> Result, ) { - let Some(metrics) = codex_otel::metrics::global() else { + let Some(metrics) = codex_otel::global() else { return; }; let tags = migration_metric_tags(item_type, skills_count); diff --git a/codex-rs/core/src/guardian/mod.rs b/codex-rs/core/src/guardian/mod.rs index e2b72bd049..1a7add9d38 100644 --- a/codex-rs/core/src/guardian/mod.rs +++ b/codex-rs/core/src/guardian/mod.rs @@ -38,7 +38,7 @@ const GUARDIAN_MAX_MESSAGE_TRANSCRIPT_TOKENS: usize = 10_000; const GUARDIAN_MAX_TOOL_TRANSCRIPT_TOKENS: usize = 10_000; const GUARDIAN_MAX_MESSAGE_ENTRY_TOKENS: usize = 2_000; const GUARDIAN_MAX_TOOL_ENTRY_TOKENS: usize = 1_000; -const GUARDIAN_MAX_ACTION_STRING_TOKENS: usize = 1_000; +const GUARDIAN_MAX_ACTION_STRING_TOKENS: usize = 16_000; const GUARDIAN_APPROVAL_RISK_THRESHOLD: u8 = 80; const GUARDIAN_RECENT_ENTRY_LIMIT: usize = 40; const TRUNCATION_TAG: &str = "truncated"; diff --git a/codex-rs/core/src/guardian/prompt.rs b/codex-rs/core/src/guardian/prompt.rs index 5741a65fc0..7558c005b3 100644 --- a/codex-rs/core/src/guardian/prompt.rs +++ b/codex-rs/core/src/guardian/prompt.rs @@ -107,16 +107,22 @@ pub(crate) async fn build_guardian_prompt_items( Ok(items) } -/// Keeps all user turns plus a bounded amount of recent assistant/tool context. +/// Renders a compact guardian transcript from the retained history entries, +/// which are only user, assistant, and tool call entries. /// -/// The pruning strategy is intentionally simple and reviewable: -/// - always retain user messages because they carry authorization and intent -/// - walk recent non-user entries from newest to oldest -/// - keep them only while the message/tool budgets allow -/// - reserve a separate tool budget so tool evidence cannot crowd out the human -/// conversation +/// Selection is intentionally simple and predictable: +/// - each entry is truncated to its per-entry cap +/// - user and assistant entries share the message budget +/// - tool calls/results use a separate tool budget so tool evidence cannot +/// crowd out the human conversation +/// - if all user turns fit, keep them all +/// - otherwise keep the first and latest user turns as anchors, then fill the +/// remaining message budget with other user turns from newest to oldest +/// - after user turns are selected, keep recent non-user entries from newest to +/// oldest while the budgets and recent-entry limit allow /// -/// User messages are never dropped unless the entire transcript must be omitted. +/// Returns the rendered transcript plus an omission note when some entries were +/// skipped. pub(crate) fn render_guardian_transcript_entries( entries: &[GuardianTranscriptEntry], ) -> (Vec, Option) { @@ -143,20 +149,38 @@ pub(crate) fn render_guardian_transcript_entries( let mut included = vec![false; entries.len()]; let mut message_tokens = 0usize; let mut tool_tokens = 0usize; + let user_indices = entries + .iter() + .enumerate() + .filter_map(|(index, entry)| entry.kind.is_user().then_some(index)) + .collect::>(); - for (index, entry) in entries.iter().enumerate() { - if !entry.kind.is_user() { + if let Some(&first_user_index) = user_indices.first() { + included[first_user_index] = true; + message_tokens += rendered_entries[first_user_index].1; + } + + if let Some(&last_user_index) = user_indices.last() + && !included[last_user_index] + && message_tokens + rendered_entries[last_user_index].1 + <= GUARDIAN_MAX_MESSAGE_TRANSCRIPT_TOKENS + { + included[last_user_index] = true; + message_tokens += rendered_entries[last_user_index].1; + } + + for &index in user_indices.iter().rev() { + if included[index] { continue; } - message_tokens += rendered_entries[index].1; - if message_tokens > GUARDIAN_MAX_MESSAGE_TRANSCRIPT_TOKENS { - return ( - vec!["".to_string()], - Some("Conversation transcript omitted due to size.".to_string()), - ); + let token_count = rendered_entries[index].1; + if message_tokens + token_count > GUARDIAN_MAX_MESSAGE_TRANSCRIPT_TOKENS { + continue; } + included[index] = true; + message_tokens += token_count; } let mut retained_non_user_entries = 0usize; @@ -192,8 +216,7 @@ pub(crate) fn render_guardian_transcript_entries( .map(|(index, _)| rendered_entries[index].0.clone()) .collect::>(); let omitted_any = included.iter().any(|included_entry| !included_entry); - let omission_note = - omitted_any.then(|| "Earlier conversation entries were omitted.".to_string()); + let omission_note = omitted_any.then(|| "Some conversation entries were omitted.".to_string()); (transcript, omission_note) } diff --git a/codex-rs/core/src/guardian/review_session.rs b/codex-rs/core/src/guardian/review_session.rs index bc24974c82..368b812e56 100644 --- a/codex-rs/core/src/guardian/review_session.rs +++ b/codex-rs/core/src/guardian/review_session.rs @@ -137,7 +137,7 @@ impl GuardianReviewSessionReuseKey { model_reasoning_summary: spawn_config.model_reasoning_summary, permissions: spawn_config.permissions.clone(), developer_instructions: spawn_config.developer_instructions.clone(), - base_instructions: spawn_config.base_instructions.clone(), + base_instructions: spawn_config.base_instructions.clone().flatten(), user_instructions: spawn_config.user_instructions.clone(), compact_prompt: spawn_config.compact_prompt.clone(), cwd: spawn_config.cwd.to_path_buf(), diff --git a/codex-rs/core/src/guardian/tests.rs b/codex-rs/core/src/guardian/tests.rs index 17813ee7d0..2d6ddc7c31 100644 --- a/codex-rs/core/src/guardian/tests.rs +++ b/codex-rs/core/src/guardian/tests.rs @@ -3,7 +3,6 @@ use crate::codex::Session; use crate::codex::TurnContext; use crate::config::Config; use crate::config::ConfigOverrides; -use crate::config::ConfigToml; use crate::config::Constrained; use crate::config::ManagedFeatures; use crate::config::NetworkProxySpec; @@ -16,6 +15,7 @@ use crate::config_loader::NetworkDomainPermissionsToml; use crate::config_loader::RequirementSource; use crate::config_loader::Sourced; use crate::test_support; +use codex_config::config_toml::ConfigToml; use codex_network_proxy::NetworkProxyConfig; use codex_protocol::approvals::NetworkApprovalProtocol; use codex_protocol::config_types::ApprovalsReviewer; @@ -260,7 +260,7 @@ fn guardian_truncate_text_keeps_prefix_suffix_and_xml_marker() { #[test] fn format_guardian_action_pretty_truncates_large_string_fields() -> serde_json::Result<()> { - let patch = "line\n".repeat(10_000); + let patch = "line\n".repeat(100_000); let action = GuardianApprovalRequest::ApplyPatch { id: "patch-1".to_string(), cwd: PathBuf::from("/tmp"), @@ -271,6 +271,7 @@ fn format_guardian_action_pretty_truncates_large_string_fields() -> serde_json:: let rendered = format_guardian_action_pretty(&action)?; assert!(rendered.contains("\"tool\": \"apply_patch\"")); + assert!(rendered.contains(">(); + entries.extend([ + GuardianTranscriptEntry { + kind: GuardianTranscriptEntryKind::Tool("tool shell call".to_string()), + text: serde_json::json!({ + "command": ["curl", "-X", "POST", "https://example.com/upload"], + "cwd": "/repo", + }) + .to_string(), + }, + GuardianTranscriptEntry { + kind: GuardianTranscriptEntryKind::Tool("tool shell result".to_string()), + text: "sandbox blocked outbound network access".to_string(), + }, + ]); + + let (transcript, omission) = render_guardian_transcript_entries(&entries); + + assert!( + transcript + .iter() + .any(|entry| entry.starts_with("[1] user: ")) + ); + assert!(transcript.iter().any(|entry| { + entry.contains("tool shell call:") + && entry.contains("curl") + && entry.contains("https://example.com/upload") + })); + assert!( + transcript + .iter() + .any(|entry| entry + .contains("tool shell result: sandbox blocked outbound network access")) + ); + assert_eq!( + omission, + Some("Some conversation entries were omitted.".to_string()) + ); +} + #[test] fn parse_guardian_assessment_extracts_embedded_json() { let parsed = parse_guardian_assessment(Some( diff --git a/codex-rs/core/src/installation_id.rs b/codex-rs/core/src/installation_id.rs new file mode 100644 index 0000000000..940e17eb01 --- /dev/null +++ b/codex-rs/core/src/installation_id.rs @@ -0,0 +1,145 @@ +use std::fs::OpenOptions; +use std::io::Read; +use std::io::Result; +use std::io::Seek; +use std::io::SeekFrom; +use std::io::Write; +use std::path::Path; + +#[cfg(unix)] +use std::os::unix::fs::OpenOptionsExt; +#[cfg(unix)] +use std::os::unix::fs::PermissionsExt; + +use tokio::fs; +use uuid::Uuid; + +pub(crate) const INSTALLATION_ID_FILENAME: &str = "installation_id"; + +pub(crate) async fn resolve_installation_id(codex_home: &Path) -> Result { + let path = codex_home.join(INSTALLATION_ID_FILENAME); + fs::create_dir_all(codex_home).await?; + tokio::task::spawn_blocking(move || { + let mut options = OpenOptions::new(); + options.read(true).write(true).create(true); + + #[cfg(unix)] + { + options.mode(0o644); + } + + let mut file = options.open(&path)?; + file.lock()?; + + #[cfg(unix)] + { + let metadata = file.metadata()?; + let current_mode = metadata.permissions().mode() & 0o777; + if current_mode != 0o644 { + let mut permissions = metadata.permissions(); + permissions.set_mode(0o644); + file.set_permissions(permissions)?; + } + } + + let mut contents = String::new(); + file.read_to_string(&mut contents)?; + let trimmed = contents.trim(); + if !trimmed.is_empty() + && let Ok(existing) = Uuid::parse_str(trimmed) + { + return Ok(existing.to_string()); + } + + let installation_id = Uuid::new_v4().to_string(); + file.set_len(0)?; + file.seek(SeekFrom::Start(0))?; + file.write_all(installation_id.as_bytes())?; + file.flush()?; + file.sync_all()?; + + Ok(installation_id) + }) + .await? +} + +#[cfg(test)] +mod tests { + use super::INSTALLATION_ID_FILENAME; + use super::resolve_installation_id; + use pretty_assertions::assert_eq; + use tempfile::TempDir; + use uuid::Uuid; + + #[cfg(unix)] + use std::os::unix::fs::PermissionsExt; + + #[tokio::test] + async fn resolve_installation_id_generates_and_persists_uuid() { + let codex_home = TempDir::new().expect("create temp dir"); + let persisted_path = codex_home.path().join(INSTALLATION_ID_FILENAME); + + let installation_id = resolve_installation_id(codex_home.path()) + .await + .expect("resolve installation id"); + + assert_eq!( + std::fs::read_to_string(&persisted_path).expect("read persisted installation id"), + installation_id + ); + assert!(Uuid::parse_str(&installation_id).is_ok()); + + #[cfg(unix)] + { + let mode = std::fs::metadata(&persisted_path) + .expect("read installation id metadata") + .permissions() + .mode() + & 0o777; + assert_eq!(mode, 0o644); + } + } + + #[tokio::test] + async fn resolve_installation_id_reuses_existing_uuid() { + let codex_home = TempDir::new().expect("create temp dir"); + let existing = Uuid::new_v4().to_string().to_uppercase(); + std::fs::write( + codex_home.path().join(INSTALLATION_ID_FILENAME), + existing.clone(), + ) + .expect("write installation id"); + + let resolved = resolve_installation_id(codex_home.path()) + .await + .expect("resolve installation id"); + + assert_eq!( + resolved, + Uuid::parse_str(existing.as_str()) + .expect("parse existing installation id") + .to_string() + ); + } + + #[tokio::test] + async fn resolve_installation_id_rewrites_invalid_file_contents() { + let codex_home = TempDir::new().expect("create temp dir"); + std::fs::write( + codex_home.path().join(INSTALLATION_ID_FILENAME), + "not-a-uuid", + ) + .expect("write invalid installation id"); + + let resolved = resolve_installation_id(codex_home.path()) + .await + .expect("resolve installation id"); + + assert!(Uuid::parse_str(&resolved).is_ok()); + assert_eq!( + std::fs::read_to_string(codex_home.path().join(INSTALLATION_ID_FILENAME)) + .expect("read rewritten installation id"), + resolved + ); + } +} diff --git a/codex-rs/core/src/instructions/mod.rs b/codex-rs/core/src/instructions/mod.rs index 5af9a6e890..a1f77ba5e8 100644 --- a/codex-rs/core/src/instructions/mod.rs +++ b/codex-rs/core/src/instructions/mod.rs @@ -1,2 +1 @@ -pub use codex_instructions::USER_INSTRUCTIONS_PREFIX; pub(crate) use codex_instructions::UserInstructions; diff --git a/codex-rs/core/src/lib.rs b/codex-rs/core/src/lib.rs index d68c2b2992..83232de734 100644 --- a/codex-rs/core/src/lib.rs +++ b/codex-rs/core/src/lib.rs @@ -10,7 +10,7 @@ mod apps; mod arc_monitor; mod client; mod client_common; -pub mod codex; +pub(crate) mod codex; mod realtime_context; mod realtime_conversation; pub use codex::SteerInputError; @@ -38,28 +38,41 @@ mod flags; mod git_info_tests; mod guardian; mod hook_runtime; -pub mod instructions; -pub mod landlock; -pub mod mcp; +mod installation_id; +pub(crate) mod instructions; +pub(crate) mod landlock; +pub use landlock::spawn_command_under_linux_sandbox; +pub(crate) mod mcp; mod mcp_skill_dependencies; mod mcp_tool_approval_templates; mod network_policy_decision; -pub mod network_proxy_loader; +pub(crate) mod network_proxy_loader; +pub use mcp::McpManager; +pub use network_proxy_loader::MtimeConfigReloader; +pub use network_proxy_loader::build_network_proxy_state; +pub use network_proxy_loader::build_network_proxy_state_and_reloader; mod original_image_detail; -pub use codex_mcp::mcp_connection_manager; -pub use codex_mcp::mcp_connection_manager::MCP_SANDBOX_STATE_CAPABILITY; -pub use codex_mcp::mcp_connection_manager::MCP_SANDBOX_STATE_METHOD; -pub use codex_mcp::mcp_connection_manager::SandboxState; +pub use codex_mcp::MCP_SANDBOX_STATE_CAPABILITY; +pub use codex_mcp::MCP_SANDBOX_STATE_METHOD; +pub use codex_mcp::SandboxState; mod mcp_tool_call; mod memories; -pub mod mention_syntax; -pub mod message_history; -pub mod utils; +pub(crate) mod mention_syntax; +pub(crate) mod message_history; +pub(crate) mod utils; +pub use mention_syntax::PLUGIN_TEXT_MENTION_SIGIL; +pub use mention_syntax::TOOL_MENTION_SIGIL; +pub use message_history::HistoryEntry as MessageHistoryEntry; +pub use message_history::append_entry as append_message_history_entry; +pub use message_history::history_metadata as message_history_metadata; +pub use message_history::lookup as lookup_message_history_entry; pub use utils::path_utils; pub mod personality_migration; pub mod plugins; #[doc(hidden)] -pub mod prompt_debug; +pub(crate) mod prompt_debug; +#[doc(hidden)] +pub use prompt_debug::build_prompt_input; pub(crate) mod mentions { pub(crate) use crate::plugins::build_connector_slug_counts; pub(crate) use crate::plugins::build_skill_name_counts; @@ -102,54 +115,63 @@ mod event_mapping; pub mod review_format; pub mod review_prompts; mod thread_manager; -pub mod web_search; -pub mod windows_sandbox_read_grants; +pub(crate) mod web_search; +pub(crate) mod windows_sandbox_read_grants; pub use thread_manager::ForkSnapshot; pub use thread_manager::NewThread; pub use thread_manager::ThreadManager; +pub use web_search::web_search_action_detail; +pub use web_search::web_search_detail; +pub use windows_sandbox_read_grants::grant_read_root_non_elevated; #[deprecated(note = "use ThreadManager")] pub type ConversationManager = ThreadManager; #[deprecated(note = "use NewThread")] pub type NewConversation = NewThread; #[deprecated(note = "use CodexThread")] pub type CodexConversation = CodexThread; -pub mod project_doc; +pub(crate) mod project_doc; +pub use project_doc::DEFAULT_PROJECT_DOC_FILENAME; +pub use project_doc::LOCAL_PROJECT_DOC_FILENAME; +pub use project_doc::discover_project_doc_paths; +pub use project_doc::read_project_docs; mod rollout; pub(crate) mod safety; pub mod seatbelt; mod session_rollout_init_error; pub mod shell; -pub mod shell_snapshot; +pub(crate) mod shell_snapshot; pub mod spawn; -pub mod state_db_bridge; +pub(crate) mod state_db_bridge; +pub use state_db_bridge::StateDbHandle; +pub use state_db_bridge::get_state_db; mod thread_rollout_truncation; mod tools; -pub mod turn_diff_tracker; +pub(crate) mod turn_diff_tracker; mod turn_metadata; mod turn_timing; pub use rollout::ARCHIVED_SESSIONS_SUBDIR; +pub use rollout::Cursor; +pub use rollout::EventPersistenceMode; pub use rollout::INTERACTIVE_SESSION_SOURCES; pub use rollout::RolloutRecorder; pub use rollout::RolloutRecorderParams; pub use rollout::SESSIONS_SUBDIR; pub use rollout::SessionMeta; +pub use rollout::ThreadItem; +pub use rollout::ThreadSortKey; +pub use rollout::ThreadsPage; pub use rollout::append_thread_name; pub use rollout::find_archived_thread_path_by_id_str; #[deprecated(note = "use find_thread_path_by_id_str")] pub use rollout::find_conversation_path_by_id_str; pub use rollout::find_thread_name_by_id; +pub use rollout::find_thread_names_by_ids; pub use rollout::find_thread_path_by_id_str; pub use rollout::find_thread_path_by_name_str; -pub use rollout::list::Cursor; -pub use rollout::list::ThreadItem; -pub use rollout::list::ThreadSortKey; -pub use rollout::list::ThreadsPage; -pub use rollout::list::parse_cursor; -pub use rollout::list::read_head_for_summary; -pub use rollout::list::read_session_meta_line; -pub use rollout::policy::EventPersistenceMode; +pub use rollout::parse_cursor; +pub use rollout::read_head_for_summary; +pub use rollout::read_session_meta_line; pub use rollout::rollout_date_parts; -pub use rollout::session_index::find_thread_names_by_ids; mod function_tool; mod state; mod tasks; @@ -158,6 +180,7 @@ pub mod util; pub use client::ModelClient; pub use client::ModelClientSession; +pub use client::X_CODEX_INSTALLATION_ID_HEADER; pub use client::X_CODEX_TURN_METADATA_HEADER; pub use client_common::Prompt; pub use client_common::REVIEW_PROMPT; @@ -172,5 +195,7 @@ pub use exec_policy::load_exec_policy; pub use file_watcher::FileWatcherEvent; pub use turn_metadata::build_turn_metadata_header; pub mod compact; -pub mod memory_trace; +pub(crate) mod memory_trace; +pub use memory_trace::BuiltMemory; +pub use memory_trace::build_memories_from_trace_files; pub mod otel_init; diff --git a/codex-rs/core/src/mcp.rs b/codex-rs/core/src/mcp.rs index d82452c54d..83becdc07e 100644 --- a/codex-rs/core/src/mcp.rs +++ b/codex-rs/core/src/mcp.rs @@ -5,10 +5,10 @@ use crate::config::Config; use crate::plugins::PluginsManager; use codex_config::McpServerConfig; use codex_login::CodexAuth; -use codex_mcp::mcp::ToolPluginProvenance; -use codex_mcp::mcp::configured_mcp_servers; -use codex_mcp::mcp::effective_mcp_servers; -use codex_mcp::mcp::tool_plugin_provenance as collect_tool_plugin_provenance; +use codex_mcp::ToolPluginProvenance; +use codex_mcp::configured_mcp_servers; +use codex_mcp::effective_mcp_servers; +use codex_mcp::tool_plugin_provenance as collect_tool_plugin_provenance; #[derive(Clone)] pub struct McpManager { diff --git a/codex-rs/core/src/mcp_skill_dependencies.rs b/codex-rs/core/src/mcp_skill_dependencies.rs index dfd8a4c5c3..fca38919a0 100644 --- a/codex-rs/core/src/mcp_skill_dependencies.rs +++ b/codex-rs/core/src/mcp_skill_dependencies.rs @@ -21,10 +21,10 @@ use crate::SkillMetadata; use crate::codex::Session; use crate::codex::TurnContext; use crate::skills::model::SkillToolDependency; -use codex_mcp::mcp::auth::McpOAuthLoginSupport; -use codex_mcp::mcp::auth::oauth_login_support; -use codex_mcp::mcp::auth::resolve_oauth_scopes; -use codex_mcp::mcp::auth::should_retry_without_scopes; +use codex_mcp::McpOAuthLoginSupport; +use codex_mcp::oauth_login_support; +use codex_mcp::resolve_oauth_scopes; +use codex_mcp::should_retry_without_scopes; const SKILL_MCP_DEPENDENCY_PROMPT_ID: &str = "skill_mcp_dependency_install"; const MCP_DEPENDENCY_OPTION_INSTALL: &str = "Install"; diff --git a/codex-rs/core/src/mcp_tool_call.rs b/codex-rs/core/src/mcp_tool_call.rs index 344e46d0be..2a94d13f89 100644 --- a/codex-rs/core/src/mcp_tool_call.rs +++ b/codex-rs/core/src/mcp_tool_call.rs @@ -32,7 +32,7 @@ use codex_analytics::InvocationType; use codex_analytics::build_track_events_context; use codex_config::types::AppToolApproval; use codex_features::Feature; -use codex_mcp::mcp::CODEX_APPS_MCP_SERVER_NAME; +use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; use codex_otel::sanitize_metric_tag_value; use codex_protocol::mcp::CallToolResult; use codex_protocol::openai_models::InputModality; diff --git a/codex-rs/core/src/mcp_tool_call_tests.rs b/codex-rs/core/src/mcp_tool_call_tests.rs index 5bb9194857..b313a43e71 100644 --- a/codex-rs/core/src/mcp_tool_call_tests.rs +++ b/codex-rs/core/src/mcp_tool_call_tests.rs @@ -2,9 +2,9 @@ use super::*; use crate::codex::make_session_and_context; use crate::codex::make_session_and_context_with_rx; use crate::config::ConfigBuilder; -use crate::config::ConfigToml; use crate::state::ActiveTurn; use codex_config::CONFIG_TOML_FILE; +use codex_config::config_toml::ConfigToml; use codex_config::types::AppConfig; use codex_config::types::AppToolConfig; use codex_config::types::AppToolsConfig; diff --git a/codex-rs/core/src/memories/phase1.rs b/codex-rs/core/src/memories/phase1.rs index 37cb0e9f48..0b8cdcdad5 100644 --- a/codex-rs/core/src/memories/phase1.rs +++ b/codex-rs/core/src/memories/phase1.rs @@ -336,9 +336,9 @@ mod job { }], tools: Vec::new(), parallel_tool_calls: false, - base_instructions: BaseInstructions { + base_instructions: Some(BaseInstructions { text: phase_one::PROMPT.to_string(), - }, + }), personality: None, output_schema: Some(output_schema()), }; diff --git a/codex-rs/core/src/network_proxy_loader.rs b/codex-rs/core/src/network_proxy_loader.rs index a6740b05fb..8d387ca968 100644 --- a/codex-rs/core/src/network_proxy_loader.rs +++ b/codex-rs/core/src/network_proxy_loader.rs @@ -1,7 +1,4 @@ -use crate::config::NetworkToml; -use crate::config::PermissionsToml; use crate::config::find_codex_home; -use crate::config::overlay_network_domain_permissions; use crate::config::resolve_permission_profile; use crate::config_loader::CloudRequirementsLoader; use crate::config_loader::ConfigLayerStack; @@ -16,6 +13,9 @@ use anyhow::Result; use async_trait::async_trait; use codex_app_server_protocol::ConfigLayerSource; use codex_config::CONFIG_TOML_FILE; +use codex_config::permissions_toml::NetworkToml; +use codex_config::permissions_toml::PermissionsToml; +use codex_config::permissions_toml::overlay_network_domain_permissions; use codex_network_proxy::ConfigReloader; use codex_network_proxy::ConfigState; use codex_network_proxy::NetworkProxyConfig; @@ -88,10 +88,12 @@ fn collect_layer_mtimes(stack: &ConfigLayerStack) -> Vec { let path = match &layer.name { ConfigLayerSource::System { file } => Some(file.as_path().to_path_buf()), ConfigLayerSource::User { file } => Some(file.as_path().to_path_buf()), - ConfigLayerSource::Project { dot_codex_folder } => dot_codex_folder - .join(CONFIG_TOML_FILE) - .ok() - .map(|p| p.as_path().to_path_buf()), + ConfigLayerSource::Project { dot_codex_folder } => Some( + dot_codex_folder + .join(CONFIG_TOML_FILE) + .as_path() + .to_path_buf(), + ), ConfigLayerSource::LegacyManagedConfigTomlFromFile { file } => { Some(file.as_path().to_path_buf()) } diff --git a/codex-rs/core/src/otel_init.rs b/codex-rs/core/src/otel_init.rs index c732692d62..4a0e7cd984 100644 --- a/codex-rs/core/src/otel_init.rs +++ b/codex-rs/core/src/otel_init.rs @@ -3,11 +3,11 @@ use codex_config::types::OtelExporterKind as Kind; use codex_config::types::OtelHttpProtocol as Protocol; use codex_features::Feature; use codex_login::default_client::originator; +use codex_otel::OtelExporter; +use codex_otel::OtelHttpProtocol; use codex_otel::OtelProvider; -use codex_otel::config::OtelExporter; -use codex_otel::config::OtelHttpProtocol; -use codex_otel::config::OtelSettings; -use codex_otel::config::OtelTlsConfig as OtelTlsSettings; +use codex_otel::OtelSettings; +use codex_otel::OtelTlsConfig as OtelTlsSettings; use std::error::Error; /// Build an OpenTelemetry provider from the app Config. diff --git a/codex-rs/core/src/personality_migration.rs b/codex-rs/core/src/personality_migration.rs index 8a3786e79e..52cabf55de 100644 --- a/codex-rs/core/src/personality_migration.rs +++ b/codex-rs/core/src/personality_migration.rs @@ -1,4 +1,3 @@ -use crate::config::ConfigToml; use crate::config::edit::ConfigEditsBuilder; use crate::rollout::ARCHIVED_SESSIONS_SUBDIR; use crate::rollout::SESSIONS_SUBDIR; @@ -6,6 +5,7 @@ use crate::rollout::list::ThreadListConfig; use crate::rollout::list::ThreadListLayout; use crate::rollout::list::ThreadSortKey; use crate::rollout::list::get_threads_in_root; +use codex_config::config_toml::ConfigToml; use codex_protocol::config_types::Personality; use codex_protocol::protocol::SessionSource; use codex_rollout::state_db; diff --git a/codex-rs/core/src/personality_migration_tests.rs b/codex-rs/core/src/personality_migration_tests.rs index de1070ad34..3c8bb077fb 100644 --- a/codex-rs/core/src/personality_migration_tests.rs +++ b/codex-rs/core/src/personality_migration_tests.rs @@ -43,6 +43,7 @@ async fn write_session_with_user_event(codex_home: &Path) -> io::Result<()> { agent_role: None, model_provider: None, base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, }, diff --git a/codex-rs/core/src/plugins/injection.rs b/codex-rs/core/src/plugins/injection.rs index e482010b2e..3e5bb6ecb5 100644 --- a/codex-rs/core/src/plugins/injection.rs +++ b/codex-rs/core/src/plugins/injection.rs @@ -7,8 +7,8 @@ use codex_protocol::models::ResponseItem; use crate::connectors; use crate::plugins::PluginCapabilitySummary; use crate::plugins::render_explicit_plugin_instructions; -use codex_mcp::mcp::CODEX_APPS_MCP_SERVER_NAME; -use codex_mcp::mcp_connection_manager::ToolInfo; +use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; +use codex_mcp::ToolInfo; pub(crate) fn build_plugin_injections( mentioned_plugins: &[PluginCapabilitySummary], diff --git a/codex-rs/core/src/plugins/manager.rs b/codex-rs/core/src/plugins/manager.rs index 2b21a23703..1da98b85cb 100644 --- a/codex-rs/core/src/plugins/manager.rs +++ b/codex-rs/core/src/plugins/manager.rs @@ -25,6 +25,7 @@ use super::startup_sync::start_startup_remote_plugin_sync_once; use super::store::PluginInstallResult as StorePluginInstallResult; use super::store::PluginStore; use super::store::PluginStoreError; +use super::store::plugin_version_for_source; use super::sync_openai_plugins_repo; use crate::SkillMetadata; use crate::config::CONFIG_TOML_FILE; @@ -99,6 +100,13 @@ struct CachedFeaturedPluginIds { featured_plugin_ids: Vec, } +#[derive(Default)] +struct NonCuratedCacheRefreshState { + requested_roots: Option>, + last_refreshed_roots: Option>, + in_flight: bool, +} + fn featured_plugin_ids_cache_key( config: &Config, auth: Option<&CodexAuth>, @@ -312,6 +320,7 @@ pub struct PluginsManager { codex_home: PathBuf, store: PluginStore, featured_plugin_ids_cache: RwLock>, + non_curated_cache_refresh_state: RwLock, cached_enabled_outcome: RwLock>, remote_sync_lock: Mutex<()>, restriction_product: Option, @@ -338,6 +347,7 @@ impl PluginsManager { codex_home: codex_home.clone(), store: PluginStore::new(codex_home), featured_plugin_ids_cache: RwLock::new(None), + non_curated_cache_refresh_state: RwLock::new(NonCuratedCacheRefreshState::default()), cached_enabled_outcome: RwLock::new(None), remote_sync_lock: Mutex::new(()), restriction_product, @@ -719,6 +729,7 @@ impl PluginsManager { )); } + let mut missing_remote_plugins = Vec::::new(); let mut remote_installed_plugin_names = HashSet::::new(); for plugin in remote_plugins { if plugin.marketplace_name != marketplace_name { @@ -727,11 +738,7 @@ impl PluginsManager { }); } if !local_plugin_names.contains(&plugin.name) { - warn!( - plugin = plugin.name, - marketplace = %marketplace_name, - "ignoring remote plugin missing from local marketplace during sync" - ); + missing_remote_plugins.push(plugin.name); continue; } // For now, sync treats remote `enabled = false` as uninstall rather than a distinct @@ -753,6 +760,19 @@ impl PluginsManager { let mut result = RemotePluginSyncResult::default(); let remote_plugin_count = remote_installed_plugin_names.len(); let local_plugin_count = local_plugins.len(); + if !missing_remote_plugins.is_empty() { + let sample_missing_plugins = missing_remote_plugins + .iter() + .take(10) + .cloned() + .collect::>(); + warn!( + marketplace = %marketplace_name, + missing_remote_plugin_count = missing_remote_plugins.len(), + missing_remote_plugin_examples = ?sample_missing_plugins, + "ignoring remote plugins missing from local marketplace during sync" + ); + } for ( plugin_name, @@ -1034,6 +1054,56 @@ impl PluginsManager { } } + pub fn maybe_start_non_curated_plugin_cache_refresh_for_roots( + self: &Arc, + roots: &[AbsolutePathBuf], + ) { + let mut roots = roots.to_vec(); + roots.sort_unstable_by(|left, right| left.as_path().cmp(right.as_path())); + roots.dedup(); + if roots.is_empty() { + return; + } + + let should_spawn = { + let mut state = match self.non_curated_cache_refresh_state.write() { + Ok(state) => state, + Err(err) => err.into_inner(), + }; + // Collapse repeated plugin/list requests onto one worker and only queue another pass + // when the requested roots set actually changes. + if state.requested_roots.as_ref() == Some(&roots) + || (!state.in_flight && state.last_refreshed_roots.as_ref() == Some(&roots)) + { + return; + } + state.requested_roots = Some(roots); + if state.in_flight { + false + } else { + state.in_flight = true; + true + } + }; + if !should_spawn { + return; + } + + let manager = Arc::clone(self); + if let Err(err) = std::thread::Builder::new() + .name("plugins-non-curated-cache-refresh".to_string()) + .spawn(move || manager.run_non_curated_plugin_cache_refresh_loop()) + { + let mut state = match self.non_curated_cache_refresh_state.write() { + Ok(state) => state, + Err(err) => err.into_inner(), + }; + state.in_flight = false; + state.requested_roots = None; + warn!("failed to start non-curated plugin cache refresh task: {err}"); + } + } + fn start_curated_repo_sync(self: &Arc) { if CURATED_REPO_SYNC_STARTED.swap(true, Ordering::SeqCst) { return; @@ -1045,8 +1115,13 @@ impl PluginsManager { .spawn( move || match sync_openai_plugins_repo(codex_home.as_path()) { Ok(curated_plugin_version) => { - let configured_curated_plugin_ids = - configured_curated_plugin_ids_from_codex_home(codex_home.as_path()); + let configured_curated_plugin_ids = curated_plugin_ids_from_config_keys( + configured_plugins_from_codex_home( + codex_home.as_path(), + "failed to read user config while refreshing curated plugin cache", + "failed to parse user config while refreshing curated plugin cache", + ), + ); match refresh_curated_plugin_cache( codex_home.as_path(), &curated_plugin_version, @@ -1076,6 +1151,55 @@ impl PluginsManager { } } + fn run_non_curated_plugin_cache_refresh_loop(self: Arc) { + loop { + let roots = { + let state = match self.non_curated_cache_refresh_state.read() { + Ok(state) => state, + Err(err) => err.into_inner(), + }; + state.requested_roots.clone() + }; + + let Some(roots) = roots else { + let mut state = match self.non_curated_cache_refresh_state.write() { + Ok(state) => state, + Err(err) => err.into_inner(), + }; + state.in_flight = false; + return; + }; + + let refreshed = + match refresh_non_curated_plugin_cache(self.codex_home.as_path(), &roots) { + Ok(cache_refreshed) => { + if cache_refreshed { + self.clear_cache(); + } + true + } + Err(err) => { + self.clear_cache(); + warn!("failed to refresh non-curated plugin cache: {err}"); + false + } + }; + + let mut state = match self.non_curated_cache_refresh_state.write() { + Ok(state) => state, + Err(err) => err.into_inner(), + }; + if refreshed { + state.last_refreshed_roots = Some(roots.clone()); + } + if state.requested_roots.as_ref() == Some(&roots) { + state.requested_roots = None; + state.in_flight = false; + return; + } + } + } + fn configured_plugin_states(&self, config: &Config) -> (HashSet, HashSet) { let configured_plugins = configured_plugins_from_stack(&config.config_layer_stack); let installed_plugins = configured_plugins @@ -1308,6 +1432,90 @@ fn refresh_curated_plugin_cache( Ok(cache_refreshed) } +fn refresh_non_curated_plugin_cache( + codex_home: &Path, + additional_roots: &[AbsolutePathBuf], +) -> Result { + let configured_non_curated_plugin_ids = + non_curated_plugin_ids_from_config_keys(configured_plugins_from_codex_home( + codex_home, + "failed to read user config while refreshing non-curated plugin cache", + "failed to parse user config while refreshing non-curated plugin cache", + )); + if configured_non_curated_plugin_ids.is_empty() { + return Ok(false); + } + let configured_non_curated_plugin_keys = configured_non_curated_plugin_ids + .iter() + .map(PluginId::as_key) + .collect::>(); + + let store = PluginStore::new(codex_home.to_path_buf()); + let marketplace_outcome = list_marketplaces(additional_roots) + .map_err(|err| format!("failed to discover marketplaces for cache refresh: {err}"))?; + let mut plugin_sources = HashMap::::new(); + + for marketplace in marketplace_outcome.marketplaces { + if marketplace.name == OPENAI_CURATED_MARKETPLACE_NAME { + continue; + } + + for plugin in marketplace.plugins { + let plugin_id = + PluginId::new(plugin.name.clone(), marketplace.name.clone()).map_err(|err| { + match err { + PluginIdError::Invalid(message) => { + format!("failed to prepare non-curated plugin cache refresh: {message}") + } + } + })?; + let plugin_key = plugin_id.as_key(); + if !configured_non_curated_plugin_keys.contains(&plugin_key) { + continue; + } + if plugin_sources.contains_key(&plugin_key) { + warn!( + plugin = plugin.name, + marketplace = marketplace.name, + "ignoring duplicate non-curated plugin entry during cache refresh" + ); + continue; + } + + let source_path = match plugin.source { + MarketplacePluginSource::Local { path } => path, + }; + let plugin_version = plugin_version_for_source(source_path.as_path()) + .map_err(|err| format!("failed to read plugin version for {plugin_key}: {err}"))?; + plugin_sources.insert(plugin_key, (source_path, plugin_version)); + } + } + + let mut cache_refreshed = false; + for plugin_id in configured_non_curated_plugin_ids { + let plugin_key = plugin_id.as_key(); + let Some((source_path, plugin_version)) = plugin_sources.get(&plugin_key).cloned() else { + warn!( + plugin = plugin_id.plugin_name, + marketplace = plugin_id.marketplace_name, + "configured non-curated plugin no longer exists in discovered marketplaces during cache refresh" + ); + continue; + }; + + if store.active_plugin_version(&plugin_id).as_deref() == Some(plugin_version.as_str()) { + continue; + } + + store + .install_with_version(source_path, plugin_id.clone(), plugin_version) + .map_err(|err| format!("failed to refresh plugin cache for {plugin_key}: {err}"))?; + cache_refreshed = true; + } + + Ok(cache_refreshed) +} + fn configured_plugins_from_stack( config_layer_stack: &ConfigLayerStack, ) -> HashMap { @@ -1333,42 +1541,22 @@ fn configured_plugins_from_user_config_value( } } -fn configured_curated_plugin_ids( - configured_plugins: HashMap, -) -> Vec { - let mut configured_curated_plugin_ids = configured_plugins - .into_keys() - .filter_map(|plugin_key| match PluginId::parse(&plugin_key) { - Ok(plugin_id) if plugin_id.marketplace_name == OPENAI_CURATED_MARKETPLACE_NAME => { - Some(plugin_id) - } - Ok(_) => None, - Err(err) => { - warn!( - plugin_key, - error = %err, - "ignoring invalid configured plugin key during curated sync setup" - ); - None - } - }) - .collect::>(); - configured_curated_plugin_ids.sort_unstable_by_key(PluginId::as_key); - configured_curated_plugin_ids -} - -fn configured_curated_plugin_ids_from_codex_home(codex_home: &Path) -> Vec { +fn configured_plugins_from_codex_home( + codex_home: &Path, + read_error_message: &str, + parse_error_message: &str, +) -> HashMap { let config_path = codex_home.join(CONFIG_TOML_FILE); let user_config = match fs::read_to_string(&config_path) { Ok(user_config) => user_config, - Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Vec::new(), + Err(err) if err.kind() == std::io::ErrorKind::NotFound => return HashMap::new(), Err(err) => { warn!( path = %config_path.display(), error = %err, - "failed to read user config while refreshing curated plugin cache" + "{read_error_message}" ); - return Vec::new(); + return HashMap::new(); } }; @@ -1378,13 +1566,61 @@ fn configured_curated_plugin_ids_from_codex_home(codex_home: &Path) -> Vec, + invalid_plugin_key_message: &str, +) -> Vec { + configured_plugins + .into_keys() + .filter_map(|plugin_key| match PluginId::parse(&plugin_key) { + Ok(plugin_id) => Some(plugin_id), + Err(err) => { + warn!( + plugin_key, + error = %err, + "{invalid_plugin_key_message}" + ); + None + } + }) + .collect() +} + +fn curated_plugin_ids_from_config_keys( + configured_plugins: HashMap, +) -> Vec { + let mut configured_curated_plugin_ids = configured_plugin_ids( + configured_plugins, + "ignoring invalid configured plugin key during curated sync setup", + ) + .into_iter() + .filter(|plugin_id| plugin_id.marketplace_name == OPENAI_CURATED_MARKETPLACE_NAME) + .collect::>(); + configured_curated_plugin_ids.sort_unstable_by_key(PluginId::as_key); + configured_curated_plugin_ids +} + +fn non_curated_plugin_ids_from_config_keys( + configured_plugins: HashMap, +) -> Vec { + let mut configured_non_curated_plugin_ids = configured_plugin_ids( + configured_plugins, + "ignoring invalid plugin key during non-curated cache refresh setup", + ) + .into_iter() + .filter(|plugin_id| plugin_id.marketplace_name != OPENAI_CURATED_MARKETPLACE_NAME) + .collect::>(); + configured_non_curated_plugin_ids.sort_unstable_by_key(PluginId::as_key); + configured_non_curated_plugin_ids } fn load_plugin( diff --git a/codex-rs/core/src/plugins/manager_tests.rs b/codex-rs/core/src/plugins/manager_tests.rs index a9f455bbc4..2f1d3ae9c6 100644 --- a/codex-rs/core/src/plugins/manager_tests.rs +++ b/codex-rs/core/src/plugins/manager_tests.rs @@ -30,19 +30,36 @@ use wiremock::matchers::query_param; const MAX_CAPABILITY_SUMMARY_DESCRIPTION_LEN: usize = 1024; -fn write_plugin(root: &Path, dir_name: &str, manifest_name: &str) { +fn write_plugin_with_version( + root: &Path, + dir_name: &str, + manifest_name: &str, + manifest_version: Option<&str>, +) { let plugin_root = root.join(dir_name); fs::create_dir_all(plugin_root.join(".codex-plugin")).unwrap(); fs::create_dir_all(plugin_root.join("skills")).unwrap(); + let version = manifest_version + .map(|manifest_version| format!(r#","version":"{manifest_version}""#)) + .unwrap_or_default(); fs::write( plugin_root.join(".codex-plugin/plugin.json"), - format!(r#"{{"name":"{manifest_name}"}}"#), + format!(r#"{{"name":"{manifest_name}"{version}}}"#), ) .unwrap(); fs::write(plugin_root.join("skills/SKILL.md"), "skill").unwrap(); fs::write(plugin_root.join(".mcp.json"), r#"{"mcpServers":{}}"#).unwrap(); } +fn write_plugin(root: &Path, dir_name: &str, manifest_name: &str) { + write_plugin_with_version( + root, + dir_name, + manifest_name, + /*manifest_version*/ None, + ); +} + fn plugin_config_toml(enabled: bool, plugins_feature_enabled: bool) -> String { let mut root = toml::map::Map::new(); @@ -955,6 +972,60 @@ async fn install_plugin_updates_config_with_relative_path_and_plugin_key() { assert!(config.contains("enabled = true")); } +#[tokio::test] +async fn install_plugin_uses_manifest_version_for_non_curated_plugins() { + let tmp = tempfile::tempdir().unwrap(); + let repo_root = tmp.path().join("repo"); + fs::create_dir_all(repo_root.join(".git")).unwrap(); + fs::create_dir_all(repo_root.join(".agents/plugins")).unwrap(); + write_plugin_with_version( + &repo_root, + "sample-plugin", + "sample-plugin", + Some("1.2.3-beta+7"), + ); + fs::write( + repo_root.join(".agents/plugins/marketplace.json"), + r#"{ + "name": "debug", + "plugins": [ + { + "name": "sample-plugin", + "source": { + "source": "local", + "path": "./sample-plugin" + } + } + ] +}"#, + ) + .unwrap(); + + let result = PluginsManager::new(tmp.path().to_path_buf()) + .install_plugin(PluginInstallRequest { + plugin_name: "sample-plugin".to_string(), + marketplace_path: AbsolutePathBuf::try_from( + repo_root.join(".agents/plugins/marketplace.json"), + ) + .unwrap(), + }) + .await + .unwrap(); + + let installed_path = tmp + .path() + .join("plugins/cache/debug/sample-plugin/1.2.3-beta+7"); + assert_eq!( + result, + PluginInstallOutcome { + plugin_id: PluginId::new("sample-plugin".to_string(), "debug".to_string()).unwrap(), + plugin_version: "1.2.3-beta+7".to_string(), + installed_path: AbsolutePathBuf::try_from(installed_path).unwrap(), + auth_policy: MarketplacePluginAuthPolicy::OnInstall, + } + ); +} + #[tokio::test] async fn uninstall_plugin_removes_cache_and_config_entry() { let tmp = tempfile::tempdir().unwrap(); @@ -2208,7 +2279,7 @@ fn refresh_curated_plugin_cache_reinstalls_missing_configured_plugin_with_curren } #[test] -fn configured_curated_plugin_ids_from_codex_home_reads_latest_user_config() { +fn curated_plugin_ids_from_config_keys_reads_latest_codex_home_user_config() { let tmp = tempfile::tempdir().unwrap(); write_file( &tmp.path().join(CONFIG_TOML_FILE), @@ -2224,10 +2295,14 @@ enabled = true ); assert_eq!( - configured_curated_plugin_ids_from_codex_home(tmp.path()) - .into_iter() - .map(|plugin_id| plugin_id.as_key()) - .collect::>(), + curated_plugin_ids_from_config_keys(configured_plugins_from_codex_home( + tmp.path(), + "failed to read user config while refreshing curated plugin cache", + "failed to parse user config while refreshing curated plugin cache", + )) + .into_iter() + .map(|plugin_id| plugin_id.as_key()) + .collect::>(), vec!["slack@openai-curated".to_string()] ); @@ -2239,7 +2314,11 @@ plugins = true ); assert_eq!( - configured_curated_plugin_ids_from_codex_home(tmp.path()), + curated_plugin_ids_from_config_keys(configured_plugins_from_codex_home( + tmp.path(), + "failed to read user config while refreshing curated plugin cache", + "failed to parse user config while refreshing curated plugin cache", + )), Vec::::new() ); } @@ -2266,6 +2345,212 @@ fn refresh_curated_plugin_cache_returns_false_when_configured_plugins_are_curren ); } +#[test] +fn refresh_non_curated_plugin_cache_replaces_existing_local_version_with_manifest_version() { + let tmp = tempfile::tempdir().unwrap(); + let repo_root = tmp.path().join("repo"); + fs::create_dir_all(repo_root.join(".git")).unwrap(); + fs::create_dir_all(repo_root.join(".agents/plugins")).unwrap(); + write_plugin_with_version(&repo_root, "sample-plugin", "sample-plugin", Some("1.2.3")); + write_file( + &repo_root.join(".agents/plugins/marketplace.json"), + r#"{ + "name": "debug", + "plugins": [ + { + "name": "sample-plugin", + "source": { + "source": "local", + "path": "./sample-plugin" + } + } + ] +}"#, + ); + write_plugin( + &tmp.path().join("plugins/cache/debug"), + "sample-plugin/local", + "sample-plugin", + ); + write_file( + &tmp.path().join(CONFIG_TOML_FILE), + r#"[features] +plugins = true + +[plugins."sample-plugin@debug"] +enabled = true +"#, + ); + + assert!( + refresh_non_curated_plugin_cache( + tmp.path(), + &[AbsolutePathBuf::try_from(repo_root).unwrap()], + ) + .expect("cache refresh should succeed") + ); + + assert!( + !tmp.path() + .join("plugins/cache/debug/sample-plugin/local") + .exists() + ); + assert!( + tmp.path() + .join("plugins/cache/debug/sample-plugin/1.2.3") + .is_dir() + ); +} + +#[test] +fn refresh_non_curated_plugin_cache_reinstalls_missing_configured_plugin_with_manifest_version() { + let tmp = tempfile::tempdir().unwrap(); + let repo_root = tmp.path().join("repo"); + fs::create_dir_all(repo_root.join(".git")).unwrap(); + fs::create_dir_all(repo_root.join(".agents/plugins")).unwrap(); + write_plugin_with_version(&repo_root, "sample-plugin", "sample-plugin", Some("1.2.3")); + write_file( + &repo_root.join(".agents/plugins/marketplace.json"), + r#"{ + "name": "debug", + "plugins": [ + { + "name": "sample-plugin", + "source": { + "source": "local", + "path": "./sample-plugin" + } + } + ] +}"#, + ); + write_file( + &tmp.path().join(CONFIG_TOML_FILE), + r#"[features] +plugins = true + +[plugins."sample-plugin@debug"] +enabled = true +"#, + ); + + assert!( + refresh_non_curated_plugin_cache( + tmp.path(), + &[AbsolutePathBuf::try_from(repo_root).unwrap()], + ) + .expect("cache refresh should reinstall missing configured plugin") + ); + + assert!( + tmp.path() + .join("plugins/cache/debug/sample-plugin/1.2.3") + .is_dir() + ); +} + +#[test] +fn refresh_non_curated_plugin_cache_returns_false_when_configured_plugins_are_current() { + let tmp = tempfile::tempdir().unwrap(); + let repo_root = tmp.path().join("repo"); + fs::create_dir_all(repo_root.join(".git")).unwrap(); + fs::create_dir_all(repo_root.join(".agents/plugins")).unwrap(); + write_plugin_with_version(&repo_root, "sample-plugin", "sample-plugin", Some("1.2.3")); + write_file( + &repo_root.join(".agents/plugins/marketplace.json"), + r#"{ + "name": "debug", + "plugins": [ + { + "name": "sample-plugin", + "source": { + "source": "local", + "path": "./sample-plugin" + } + } + ] +}"#, + ); + write_plugin_with_version( + &tmp.path().join("plugins/cache/debug"), + "sample-plugin/1.2.3", + "sample-plugin", + Some("1.2.3"), + ); + write_file( + &tmp.path().join(CONFIG_TOML_FILE), + r#"[features] +plugins = true + +[plugins."sample-plugin@debug"] +enabled = true +"#, + ); + + assert!( + !refresh_non_curated_plugin_cache( + tmp.path(), + &[AbsolutePathBuf::try_from(repo_root).unwrap()], + ) + .expect("cache refresh should be a no-op when configured plugins are current") + ); +} + +#[test] +fn refresh_non_curated_plugin_cache_ignores_invalid_unconfigured_plugin_versions() { + let tmp = tempfile::tempdir().unwrap(); + let repo_root = tmp.path().join("repo"); + fs::create_dir_all(repo_root.join(".git")).unwrap(); + fs::create_dir_all(repo_root.join(".agents/plugins")).unwrap(); + write_plugin_with_version(&repo_root, "sample-plugin", "sample-plugin", Some("1.2.3")); + write_plugin_with_version(&repo_root, "broken-plugin", "broken-plugin", Some(" ")); + write_file( + &repo_root.join(".agents/plugins/marketplace.json"), + r#"{ + "name": "debug", + "plugins": [ + { + "name": "sample-plugin", + "source": { + "source": "local", + "path": "./sample-plugin" + } + }, + { + "name": "broken-plugin", + "source": { + "source": "local", + "path": "./broken-plugin" + } + } + ] +}"#, + ); + write_file( + &tmp.path().join(CONFIG_TOML_FILE), + r#"[features] +plugins = true + +[plugins."sample-plugin@debug"] +enabled = true +"#, + ); + + assert!( + refresh_non_curated_plugin_cache( + tmp.path(), + &[AbsolutePathBuf::try_from(repo_root).unwrap()], + ) + .expect("cache refresh should ignore unrelated invalid plugin manifests") + ); + + assert!( + tmp.path() + .join("plugins/cache/debug/sample-plugin/1.2.3") + .is_dir() + ); +} + #[test] fn load_plugins_ignores_project_config_files() { let codex_home = TempDir::new().unwrap(); diff --git a/codex-rs/core/src/plugins/manifest.rs b/codex-rs/core/src/plugins/manifest.rs index 8fbacb4565..f1253441bd 100644 --- a/codex-rs/core/src/plugins/manifest.rs +++ b/codex-rs/core/src/plugins/manifest.rs @@ -14,6 +14,8 @@ struct RawPluginManifest { #[serde(default)] name: String, #[serde(default)] + version: Option, + #[serde(default)] description: Option, // Keep manifest paths as raw strings so we can validate the required `./...` syntax before // resolving them under the plugin root. @@ -30,6 +32,7 @@ struct RawPluginManifest { #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct PluginManifest { pub(crate) name: String, + pub(crate) version: Option, pub(crate) description: Option, pub(crate) paths: PluginManifestPaths, pub(crate) interface: Option, @@ -121,6 +124,7 @@ pub(crate) fn load_plugin_manifest(plugin_root: &Path) -> Option Ok(manifest) => { let RawPluginManifest { name: raw_name, + version, description, skills, mcp_servers, @@ -133,6 +137,10 @@ pub(crate) fn load_plugin_manifest(plugin_root: &Path) -> Option .filter(|_| raw_name.trim().is_empty()) .unwrap_or(&raw_name) .to_string(); + let version = version.and_then(|version| { + let version = version.trim(); + (!version.is_empty()).then(|| version.to_string()) + }); let interface = interface.and_then(|interface| { let RawPluginManifestInterface { display_name, @@ -204,6 +212,7 @@ pub(crate) fn load_plugin_manifest(plugin_root: &Path) -> Option }); Some(PluginManifest { name, + version, description, paths: PluginManifestPaths { skills: resolve_manifest_path(plugin_root, "skills", skills.as_deref()), @@ -381,13 +390,17 @@ mod tests { use std::path::Path; use tempfile::tempdir; - fn write_manifest(plugin_root: &Path, interface: &str) { + fn write_manifest(plugin_root: &Path, version: Option<&str>, interface: &str) { fs::create_dir_all(plugin_root.join(".codex-plugin")).expect("create manifest dir"); + let version = version + .map(|version| format!(" \"version\": \"{version}\",\n")) + .unwrap_or_default(); fs::write( plugin_root.join(".codex-plugin/plugin.json"), format!( r#"{{ "name": "demo-plugin", +{version} "interface": {interface} }}"# ), @@ -405,6 +418,7 @@ mod tests { let plugin_root = tmp.path().join("demo-plugin"); write_manifest( &plugin_root, + /*version*/ None, r#"{ "displayName": "Demo Plugin", "defaultPrompt": " Summarize my inbox " @@ -427,6 +441,7 @@ mod tests { let too_long = "x".repeat(MAX_DEFAULT_PROMPT_LEN + 1); write_manifest( &plugin_root, + /*version*/ None, &format!( r#"{{ "displayName": "Demo Plugin", @@ -462,6 +477,7 @@ mod tests { let plugin_root = tmp.path().join("demo-plugin"); write_manifest( &plugin_root, + /*version*/ None, r#"{ "displayName": "Demo Plugin", "defaultPrompt": { "text": "Summarize my inbox" } @@ -473,4 +489,21 @@ mod tests { assert_eq!(interface.default_prompt, None); } + + #[test] + fn plugin_manifest_reads_trimmed_version() { + let tmp = tempdir().expect("tempdir"); + let plugin_root = tmp.path().join("demo-plugin"); + write_manifest( + &plugin_root, + Some(" 1.2.3-beta+7 "), + r#"{ + "displayName": "Demo Plugin" + }"#, + ); + + let manifest = load_manifest(&plugin_root); + + assert_eq!(manifest.version, Some("1.2.3-beta+7".to_string())); + } } diff --git a/codex-rs/core/src/plugins/marketplace.rs b/codex-rs/core/src/plugins/marketplace.rs index ee775262aa..0d7c68c1e5 100644 --- a/codex-rs/core/src/plugins/marketplace.rs +++ b/codex-rs/core/src/plugins/marketplace.rs @@ -299,20 +299,18 @@ fn discover_marketplace_paths_from_roots( for root in additional_roots { // Curated marketplaces can now come from an HTTP-downloaded directory that is not a git // checkout, so check the root directly before falling back to repo-root discovery. - if let Ok(path) = root.join(MARKETPLACE_RELATIVE_PATH) - && path.as_path().is_file() - && !paths.contains(&path) - { + let path = root.join(MARKETPLACE_RELATIVE_PATH); + if path.as_path().is_file() && !paths.contains(&path) { paths.push(path); continue; } if let Some(repo_root) = get_git_repo_root(root.as_path()) && let Ok(repo_root) = AbsolutePathBuf::try_from(repo_root) - && let Ok(path) = repo_root.join(MARKETPLACE_RELATIVE_PATH) - && path.as_path().is_file() - && !paths.contains(&path) { - paths.push(path); + let path = repo_root.join(MARKETPLACE_RELATIVE_PATH); + if path.as_path().is_file() && !paths.contains(&path) { + paths.push(path); + } } } @@ -370,12 +368,7 @@ fn resolve_plugin_source_path( // `marketplace.json` lives under `/.agents/plugins/`, but local plugin paths // are resolved relative to ``, not relative to the `plugins/` directory. - marketplace_root_dir(marketplace_path)? - .join(relative_source_path) - .map_err(|err| MarketplaceError::InvalidMarketplaceFile { - path: marketplace_path.to_path_buf(), - message: format!("plugin source path must resolve to an absolute path: {err}"), - }) + Ok(marketplace_root_dir(marketplace_path)?.join(relative_source_path)) } } } diff --git a/codex-rs/core/src/plugins/marketplace_tests.rs b/codex-rs/core/src/plugins/marketplace_tests.rs index d3cf55425e..226ae3ff6e 100644 --- a/codex-rs/core/src/plugins/marketplace_tests.rs +++ b/codex-rs/core/src/plugins/marketplace_tests.rs @@ -756,18 +756,16 @@ fn resolve_marketplace_plugin_rejects_non_relative_local_paths() { ) .unwrap(); - let err = resolve_marketplace_plugin( - &AbsolutePathBuf::try_from(repo_root.join(".agents/plugins/marketplace.json")).unwrap(), - "local-plugin", - Some(Product::Codex), - ) - .unwrap_err(); + let marketplace_path = + AbsolutePathBuf::try_from(repo_root.join(".agents/plugins/marketplace.json")).unwrap(); + let err = resolve_marketplace_plugin(&marketplace_path, "local-plugin", Some(Product::Codex)) + .unwrap_err(); assert_eq!( err.to_string(), format!( "invalid marketplace file `{}`: local plugin source path must start with `./`", - repo_root.join(".agents/plugins/marketplace.json").display() + marketplace_path.display() ) ); } diff --git a/codex-rs/core/src/plugins/startup_sync.rs b/codex-rs/core/src/plugins/startup_sync.rs index 6dcf18f54f..5bca32de13 100644 --- a/codex-rs/core/src/plugins/startup_sync.rs +++ b/codex-rs/core/src/plugins/startup_sync.rs @@ -6,8 +6,8 @@ use std::process::Stdio; use std::sync::Arc; use std::time::Duration; -use codex_otel::metrics::names::CURATED_PLUGINS_STARTUP_SYNC_FINAL_METRIC; -use codex_otel::metrics::names::CURATED_PLUGINS_STARTUP_SYNC_METRIC; +use codex_otel::CURATED_PLUGINS_STARTUP_SYNC_FINAL_METRIC; +use codex_otel::CURATED_PLUGINS_STARTUP_SYNC_METRIC; use reqwest::Client; use serde::Deserialize; use tempfile::TempDir; @@ -24,16 +24,20 @@ use super::PluginsManager; const GITHUB_API_BASE_URL: &str = "https://api.github.com"; const GITHUB_API_ACCEPT_HEADER: &str = "application/vnd.github+json"; const GITHUB_API_VERSION_HEADER: &str = "2022-11-28"; +const CURATED_PLUGINS_BACKUP_ARCHIVE_API_URL: &str = + "https://chatgpt.com/backend-api/plugins/export/curated"; const OPENAI_PLUGINS_OWNER: &str = "openai"; const OPENAI_PLUGINS_REPO: &str = "plugins"; const CURATED_PLUGINS_RELATIVE_DIR: &str = ".tmp/plugins"; const CURATED_PLUGINS_SHA_FILE: &str = ".tmp/plugins.sha"; +const CURATED_PLUGINS_BACKUP_ARCHIVE_FALLBACK_VERSION: &str = "export-backup"; const CURATED_PLUGINS_GIT_TIMEOUT: Duration = Duration::from_secs(30); const CURATED_PLUGINS_HTTP_TIMEOUT: Duration = Duration::from_secs(30); +const CURATED_PLUGINS_BACKUP_ARCHIVE_TIMEOUT: Duration = Duration::from_secs(30); // Keep this comfortably above a normal sync attempt so we do not race another Codex process. const CURATED_PLUGINS_STALE_TEMP_DIR_MAX_AGE: Duration = Duration::from_secs(10 * 60); const STARTUP_REMOTE_PLUGIN_SYNC_MARKER_FILE: &str = ".tmp/app-server-remote-plugin-sync-v1"; -const STARTUP_REMOTE_PLUGIN_SYNC_PREREQUISITE_TIMEOUT: Duration = Duration::from_secs(5); +const STARTUP_REMOTE_PLUGIN_SYNC_PREREQUISITE_TIMEOUT: Duration = Duration::from_secs(10); #[derive(Debug, Deserialize)] struct GitHubRepositorySummary { @@ -50,22 +54,37 @@ struct GitHubGitRefObject { sha: String, } +#[derive(Debug, Deserialize)] +struct CuratedPluginsBackupArchiveResponse { + download_url: String, +} + pub(crate) fn curated_plugins_repo_path(codex_home: &Path) -> PathBuf { codex_home.join(CURATED_PLUGINS_RELATIVE_DIR) } pub(crate) fn read_curated_plugins_sha(codex_home: &Path) -> Option { - read_sha_file(codex_home.join(CURATED_PLUGINS_SHA_FILE).as_path()) + read_sha_file(curated_plugins_sha_path(codex_home).as_path()) +} + +fn curated_plugins_sha_path(codex_home: &Path) -> PathBuf { + codex_home.join(CURATED_PLUGINS_SHA_FILE) } pub(crate) fn sync_openai_plugins_repo(codex_home: &Path) -> Result { - sync_openai_plugins_repo_with_transport_overrides(codex_home, "git", GITHUB_API_BASE_URL) + sync_openai_plugins_repo_with_transport_overrides( + codex_home, + "git", + GITHUB_API_BASE_URL, + CURATED_PLUGINS_BACKUP_ARCHIVE_API_URL, + ) } fn sync_openai_plugins_repo_with_transport_overrides( codex_home: &Path, git_binary: &str, api_base_url: &str, + backup_archive_api_url: &str, ) -> Result { match sync_openai_plugins_repo_via_git(codex_home, git_binary) { Ok(remote_sha) => { @@ -80,11 +99,46 @@ fn sync_openai_plugins_repo_with_transport_overrides( git_binary, "git sync failed for curated plugin sync; falling back to GitHub HTTP" ); - let result = sync_openai_plugins_repo_via_http(codex_home, api_base_url); - let status = if result.is_ok() { "success" } else { "failure" }; - emit_curated_plugins_startup_sync_metric("http", status); - emit_curated_plugins_startup_sync_final_metric("http", status); - result + match sync_openai_plugins_repo_via_http(codex_home, api_base_url) { + Ok(remote_sha) => { + emit_curated_plugins_startup_sync_metric("http", "success"); + emit_curated_plugins_startup_sync_final_metric("http", "success"); + Ok(remote_sha) + } + Err(http_err) => { + emit_curated_plugins_startup_sync_metric("http", "failure"); + if has_local_curated_plugins_snapshot(codex_home) { + emit_curated_plugins_startup_sync_final_metric("http", "failure"); + warn!( + error = %http_err, + "GitHub HTTP sync failed for curated plugin sync; skipping export archive fallback because a local curated plugins snapshot already exists" + ); + Err(format!( + "git sync failed for curated plugin sync: {err}; GitHub HTTP sync failed for curated plugin sync: {http_err}; export archive fallback skipped because a local curated plugins snapshot already exists" + )) + } else { + // The export archive is a lagging backup path. Only use it to bootstrap a + // missing local curated snapshot, never to refresh an existing one. + warn!( + error = %http_err, + backup_archive_api_url, + "GitHub HTTP sync failed for curated plugin sync; falling back to export archive" + ); + let result = sync_openai_plugins_repo_via_backup_archive( + codex_home, + backup_archive_api_url, + ); + let status = if result.is_ok() { "success" } else { "failure" }; + emit_curated_plugins_startup_sync_metric("export_archive", status); + emit_curated_plugins_startup_sync_final_metric("export_archive", status); + result.map_err(|export_err| { + format!( + "git sync failed for curated plugin sync: {err}; GitHub HTTP sync failed for curated plugin sync: {http_err}; export archive sync failed for curated plugin sync: {export_err}" + ) + }) + } + } + } } } } @@ -152,6 +206,29 @@ fn sync_openai_plugins_repo_via_http( Ok(remote_sha) } +fn sync_openai_plugins_repo_via_backup_archive( + codex_home: &Path, + backup_archive_api_url: &str, +) -> Result { + let repo_path = curated_plugins_repo_path(codex_home); + let sha_path = curated_plugins_sha_path(codex_home); + let runtime = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .map_err(|err| format!("failed to create curated plugins sync runtime: {err}"))?; + let staged_repo_dir = prepare_curated_repo_parent_and_temp_dir(&repo_path)?; + let zipball_bytes = runtime.block_on(fetch_curated_repo_backup_archive_zip( + backup_archive_api_url, + ))?; + extract_zipball_to_dir(&zipball_bytes, staged_repo_dir.path())?; + ensure_marketplace_manifest_exists(staged_repo_dir.path())?; + let export_version = read_extracted_backup_archive_git_sha(staged_repo_dir.path())? + .unwrap_or_else(|| CURATED_PLUGINS_BACKUP_ARCHIVE_FALLBACK_VERSION.to_string()); + activate_curated_repo(&repo_path, staged_repo_dir)?; + write_curated_plugins_sha(&sha_path, &export_version)?; + Ok(export_version) +} + pub(super) fn start_startup_remote_plugin_sync_once( manager: Arc, codex_home: PathBuf, @@ -213,17 +290,17 @@ fn startup_remote_plugin_sync_marker_path(codex_home: &Path) -> PathBuf { codex_home.join(STARTUP_REMOTE_PLUGIN_SYNC_MARKER_FILE) } -fn startup_remote_plugin_sync_prerequisites_ready(codex_home: &Path) -> bool { - codex_home - .join(".tmp/plugins/.agents/plugins/marketplace.json") +fn has_local_curated_plugins_snapshot(codex_home: &Path) -> bool { + curated_plugins_repo_path(codex_home) + .join(".agents/plugins/marketplace.json") .is_file() - && codex_home.join(".tmp/plugins.sha").is_file() + && codex_home.join(CURATED_PLUGINS_SHA_FILE).is_file() } async fn wait_for_startup_remote_plugin_sync_prerequisites(codex_home: &Path) -> bool { let deadline = tokio::time::Instant::now() + STARTUP_REMOTE_PLUGIN_SYNC_PREREQUISITE_TIMEOUT; loop { - if startup_remote_plugin_sync_prerequisites_ready(codex_home) { + if has_local_curated_plugins_snapshot(codex_home) { return true; } if tokio::time::Instant::now() >= deadline { @@ -374,7 +451,7 @@ fn emit_curated_plugins_startup_sync_counter( transport: &'static str, status: &'static str, ) { - let Some(metrics) = codex_otel::metrics::global() else { + let Some(metrics) = codex_otel::global() else { return; }; let tags = [("transport", transport), ("status", status)]; @@ -641,6 +718,126 @@ async fn fetch_curated_repo_zipball( fetch_github_bytes(&client, &zipball_url, "download curated plugins archive").await } +async fn fetch_curated_repo_backup_archive_zip( + backup_archive_api_url: &str, +) -> Result, String> { + let client = build_reqwest_client(); + let export_body = fetch_public_text( + &client, + backup_archive_api_url, + "get curated plugins export archive metadata", + ) + .await?; + let export_response: CuratedPluginsBackupArchiveResponse = serde_json::from_str(&export_body) + .map_err(|err| { + format!( + "failed to parse curated plugins backup archive response from {backup_archive_api_url}: {err}" + ) + })?; + if export_response.download_url.is_empty() { + return Err(format!( + "curated plugins backup archive response from {backup_archive_api_url} did not include a download URL" + )); + } + + fetch_public_bytes( + &client, + &export_response.download_url, + "download curated plugins export archive", + ) + .await +} + +fn read_extracted_backup_archive_git_sha(repo_path: &Path) -> Result, String> { + let git_dir = repo_path.join(".git"); + if !git_dir.is_dir() { + return Ok(None); + } + + let head_path = git_dir.join("HEAD"); + let head = std::fs::read_to_string(&head_path).map_err(|err| { + format!( + "failed to read curated plugins backup archive git HEAD {}: {err}", + head_path.display() + ) + })?; + let head = head.trim(); + if head.is_empty() { + return Err(format!( + "curated plugins backup archive git HEAD is empty at {}", + head_path.display() + )); + } + + if let Some(reference) = head.strip_prefix("ref: ") { + let reference = validate_backup_archive_git_ref(reference.trim())?; + return read_git_ref_sha(&git_dir, reference).map(Some); + } + + Ok(Some(head.to_string())) +} + +fn validate_backup_archive_git_ref(reference: &str) -> Result<&str, String> { + if !reference.starts_with("refs/") { + return Err(format!( + "curated plugins backup archive git ref must stay under refs/: {reference}" + )); + } + + let path = Path::new(reference); + if path.is_absolute() { + return Err(format!( + "curated plugins backup archive git ref must be relative: {reference}" + )); + } + + for component in path.components() { + match component { + std::path::Component::Normal(_) => {} + _ => { + return Err(format!( + "curated plugins backup archive git ref contains invalid path components: {reference}" + )); + } + } + } + + Ok(reference) +} + +fn read_git_ref_sha(git_dir: &Path, reference: &str) -> Result { + let ref_path = git_dir.join(reference); + if let Ok(sha) = std::fs::read_to_string(&ref_path) { + let sha = sha.trim(); + if sha.is_empty() { + return Err(format!( + "curated plugins backup archive git ref {reference} is empty at {}", + ref_path.display() + )); + } + return Ok(sha.to_string()); + } + + let packed_refs_path = git_dir.join("packed-refs"); + if let Ok(packed_refs) = std::fs::read_to_string(&packed_refs_path) + && let Some(sha) = packed_refs.lines().find_map(|line| { + let trimmed = line.trim(); + if trimmed.is_empty() || trimmed.starts_with('#') || trimmed.starts_with('^') { + return None; + } + let (sha, candidate_ref) = trimmed.split_once(' ')?; + (candidate_ref == reference).then_some(sha.to_string()) + }) + { + return Ok(sha); + } + + Err(format!( + "failed to resolve curated plugins backup archive git ref {reference} from {}", + git_dir.display() + )) +} + async fn fetch_github_text(client: &Client, url: &str, context: &str) -> Result { let response = github_request(client, url) .send() @@ -675,6 +872,44 @@ async fn fetch_github_bytes(client: &Client, url: &str, context: &str) -> Result Ok(body.to_vec()) } +async fn fetch_public_text(client: &Client, url: &str, context: &str) -> Result { + let response = client + .get(url) + .timeout(CURATED_PLUGINS_BACKUP_ARCHIVE_TIMEOUT) + .send() + .await + .map_err(|err| format!("failed to {context} from {url}: {err}"))?; + let status = response.status(); + let body = response.text().await.unwrap_or_default(); + if !status.is_success() { + return Err(format!( + "{context} from {url} failed with status {status}: {body}" + )); + } + Ok(body) +} + +async fn fetch_public_bytes(client: &Client, url: &str, context: &str) -> Result, String> { + let response = client + .get(url) + .timeout(CURATED_PLUGINS_BACKUP_ARCHIVE_TIMEOUT) + .send() + .await + .map_err(|err| format!("failed to {context} from {url}: {err}"))?; + let status = response.status(); + let body = response + .bytes() + .await + .map_err(|err| format!("failed to read {context} response from {url}: {err}"))?; + if !status.is_success() { + let body_text = String::from_utf8_lossy(&body); + return Err(format!( + "{context} from {url} failed with status {status}: {body_text}" + )); + } + Ok(body.to_vec()) +} + fn github_request(client: &Client, url: &str) -> reqwest::RequestBuilder { client .get(url) diff --git a/codex-rs/core/src/plugins/startup_sync_tests.rs b/codex-rs/core/src/plugins/startup_sync_tests.rs index e1a1d8596f..74dd0d0fdb 100644 --- a/codex-rs/core/src/plugins/startup_sync_tests.rs +++ b/codex-rs/core/src/plugins/startup_sync_tests.rs @@ -8,6 +8,7 @@ use codex_login::CodexAuth; use pretty_assertions::assert_eq; use std::io::Write; use std::path::Path; +use std::path::PathBuf; use tempfile::tempdir; use wiremock::Mock; use wiremock::MockServer; @@ -33,6 +34,112 @@ fn has_plugins_clone_dirs(codex_home: &Path) -> bool { }) } +#[cfg(unix)] +fn write_executable_script(path: &Path, contents: &str) { + #[cfg(unix)] + use std::os::unix::fs::PermissionsExt; + + std::fs::write(path, contents).expect("write script"); + #[cfg(unix)] + { + let mut permissions = std::fs::metadata(path).expect("metadata").permissions(); + permissions.set_mode(0o755); + std::fs::set_permissions(path, permissions).expect("chmod"); + } +} + +async fn mount_github_repo_and_ref(server: &MockServer, sha: &str) { + Mock::given(method("GET")) + .and(path("/repos/openai/plugins")) + .respond_with(ResponseTemplate::new(200).set_body_string(r#"{"default_branch":"main"}"#)) + .mount(server) + .await; + Mock::given(method("GET")) + .and(path("/repos/openai/plugins/git/ref/heads/main")) + .respond_with( + ResponseTemplate::new(200) + .set_body_string(format!(r#"{{"object":{{"sha":"{sha}"}}}}"#)), + ) + .mount(server) + .await; +} + +async fn mount_github_zipball(server: &MockServer, sha: &str, bytes: Vec) { + Mock::given(method("GET")) + .and(path(format!("/repos/openai/plugins/zipball/{sha}"))) + .respond_with( + ResponseTemplate::new(200) + .insert_header("content-type", "application/zip") + .set_body_bytes(bytes), + ) + .mount(server) + .await; +} + +async fn mount_export_archive(server: &MockServer, bytes: Vec) -> String { + let export_api_url = format!("{}/backend-api/plugins/export/curated", server.uri()); + Mock::given(method("GET")) + .and(path("/backend-api/plugins/export/curated")) + .respond_with(ResponseTemplate::new(200).set_body_string(format!( + r#"{{"download_url":"{}/files/curated-plugins.zip"}}"#, + server.uri() + ))) + .mount(server) + .await; + Mock::given(method("GET")) + .and(path("/files/curated-plugins.zip")) + .respond_with( + ResponseTemplate::new(200) + .insert_header("content-type", "application/zip") + .set_body_bytes(bytes), + ) + .mount(server) + .await; + export_api_url +} + +async fn run_sync_with_transport_overrides( + codex_home: PathBuf, + git_binary: impl Into, + api_base_url: impl Into, + backup_archive_api_url: impl Into, +) -> Result { + let git_binary = git_binary.into(); + let api_base_url = api_base_url.into(); + let backup_archive_api_url = backup_archive_api_url.into(); + tokio::task::spawn_blocking(move || { + sync_openai_plugins_repo_with_transport_overrides( + codex_home.as_path(), + &git_binary, + &api_base_url, + &backup_archive_api_url, + ) + }) + .await + .expect("sync task should join") +} + +async fn run_http_sync( + codex_home: PathBuf, + api_base_url: impl Into, +) -> Result { + let api_base_url = api_base_url.into(); + tokio::task::spawn_blocking(move || { + sync_openai_plugins_repo_via_http(codex_home.as_path(), &api_base_url) + }) + .await + .expect("sync task should join") +} + +fn assert_curated_gmail_repo(repo_path: &Path) { + assert!(repo_path.join(".agents/plugins/marketplace.json").is_file()); + assert!( + repo_path + .join("plugins/gmail/.codex-plugin/plugin.json") + .is_file() + ); +} + #[test] fn curated_plugins_repo_path_uses_codex_home_tmp_dir() { let tmp = tempdir().expect("tempdir"); @@ -100,8 +207,6 @@ fn remove_stale_curated_repo_temp_dirs_removes_only_matching_directories() { #[cfg(unix)] #[test] fn sync_openai_plugins_repo_prefers_git_when_available() { - use std::os::unix::fs::PermissionsExt; - let tmp = tempdir().expect("tempdir"); let bin_dir = tempfile::Builder::new() .prefix("fake-git-") @@ -110,9 +215,9 @@ fn sync_openai_plugins_repo_prefers_git_when_available() { let git_path = bin_dir.path().join("git"); let sha = "0123456789abcdef0123456789abcdef01234567"; - std::fs::write( + write_executable_script( &git_path, - format!( + &format!( r#"#!/bin/sh if [ "$1" = "ls-remote" ]; then printf '%s\tHEAD\n' "{sha}" @@ -135,89 +240,164 @@ echo "unexpected git invocation: $@" >&2 exit 1 "# ), - ) - .expect("write fake git"); - let mut permissions = std::fs::metadata(&git_path) - .expect("metadata") - .permissions(); - permissions.set_mode(0o755); - std::fs::set_permissions(&git_path, permissions).expect("chmod"); + ); let synced_sha = sync_openai_plugins_repo_with_transport_overrides( tmp.path(), git_path.to_str().expect("utf8 path"), "http://127.0.0.1:9", + "http://127.0.0.1:9/backend-api/plugins/export/curated", ) .expect("git sync should succeed"); assert_eq!(synced_sha, sha); - assert!(curated_plugins_repo_path(tmp.path()).join(".git").is_dir()); - assert!( - curated_plugins_repo_path(tmp.path()) - .join(".agents/plugins/marketplace.json") - .is_file() - ); + let repo_path = curated_plugins_repo_path(tmp.path()); + assert!(repo_path.join(".git").is_dir()); + assert_curated_gmail_repo(&repo_path); assert_eq!(read_curated_plugins_sha(tmp.path()).as_deref(), Some(sha)); } +#[cfg(unix)] +#[test] +fn sync_openai_plugins_repo_via_git_succeeds_with_local_rewritten_remote() { + let tmp = tempdir().expect("tempdir"); + let repo_root = tempfile::Builder::new() + .prefix("curated-repo-success-") + .tempdir() + .expect("tempdir"); + let work_repo = repo_root.path().join("work/plugins"); + let remote_repo = repo_root.path().join("remotes/openai/plugins.git"); + std::fs::create_dir_all(work_repo.join(".agents/plugins")).expect("create marketplace dir"); + std::fs::create_dir_all(work_repo.join("plugins/gmail/.codex-plugin")) + .expect("create plugin dir"); + std::fs::write( + work_repo.join(".agents/plugins/marketplace.json"), + r#"{"name":"openai-curated","plugins":[{"name":"gmail","source":{"source":"local","path":"./plugins/gmail"}}]}"#, + ) + .expect("write marketplace"); + std::fs::write( + work_repo.join("plugins/gmail/.codex-plugin/plugin.json"), + r#"{"name":"gmail"}"#, + ) + .expect("write plugin manifest"); + + let init_status = Command::new("git") + .arg("-C") + .arg(&work_repo) + .arg("init") + .status() + .expect("run git init"); + assert!(init_status.success()); + + let add_status = Command::new("git") + .arg("-C") + .arg(&work_repo) + .arg("add") + .arg(".") + .status() + .expect("run git add"); + assert!(add_status.success()); + + let commit_status = Command::new("git") + .arg("-C") + .arg(&work_repo) + .arg("-c") + .arg("user.name=Codex Test") + .arg("-c") + .arg("user.email=codex@example.com") + .arg("commit") + .arg("-m") + .arg("init") + .status() + .expect("run git commit"); + assert!(commit_status.success()); + + std::fs::create_dir_all(remote_repo.parent().expect("remote parent")) + .expect("create remote parent"); + let clone_status = Command::new("git") + .arg("clone") + .arg("--bare") + .arg(&work_repo) + .arg(&remote_repo) + .status() + .expect("run git clone --bare"); + assert!(clone_status.success()); + + let sha_output = Command::new("git") + .arg("-C") + .arg(&work_repo) + .arg("rev-parse") + .arg("HEAD") + .output() + .expect("run git rev-parse"); + assert!(sha_output.status.success()); + let sha = String::from_utf8_lossy(&sha_output.stdout) + .trim() + .to_string(); + + let git_config_path = repo_root.path().join("git-rewrite.conf"); + std::fs::write( + &git_config_path, + format!( + "[url \"file://{}/\"]\n insteadOf = https://github.com/\n", + repo_root.path().join("remotes").display() + ), + ) + .expect("write git config"); + + let bin_dir = tempfile::Builder::new() + .prefix("git-rewrite-wrapper-") + .tempdir() + .expect("tempdir"); + let git_wrapper = bin_dir.path().join("git"); + write_executable_script( + &git_wrapper, + &format!( + "#!/bin/sh\nGIT_CONFIG_GLOBAL='{}' exec git \"$@\"\n", + git_config_path.display() + ), + ); + + let synced_sha = + sync_openai_plugins_repo_via_git(tmp.path(), git_wrapper.to_str().expect("utf8 path")) + .expect("git sync should succeed"); + + assert_eq!(synced_sha, sha); + assert_curated_gmail_repo(&curated_plugins_repo_path(tmp.path())); + assert_eq!( + read_curated_plugins_sha(tmp.path()).as_deref(), + Some(sha.as_str()) + ); + assert!(!has_plugins_clone_dirs(tmp.path())); +} + #[tokio::test] async fn sync_openai_plugins_repo_falls_back_to_http_when_git_is_unavailable() { let tmp = tempdir().expect("tempdir"); let server = MockServer::start().await; let sha = "0123456789abcdef0123456789abcdef01234567"; - Mock::given(method("GET")) - .and(path("/repos/openai/plugins")) - .respond_with(ResponseTemplate::new(200).set_body_string(r#"{"default_branch":"main"}"#)) - .mount(&server) - .await; - Mock::given(method("GET")) - .and(path("/repos/openai/plugins/git/ref/heads/main")) - .respond_with( - ResponseTemplate::new(200) - .set_body_string(format!(r#"{{"object":{{"sha":"{sha}"}}}}"#)), - ) - .mount(&server) - .await; - Mock::given(method("GET")) - .and(path(format!("/repos/openai/plugins/zipball/{sha}"))) - .respond_with( - ResponseTemplate::new(200) - .insert_header("content-type", "application/zip") - .set_body_bytes(curated_repo_zipball_bytes(sha)), - ) - .mount(&server) - .await; + mount_github_repo_and_ref(&server, sha).await; + mount_github_zipball(&server, sha, curated_repo_zipball_bytes(sha)).await; - let server_uri = server.uri(); - let tmp_path = tmp.path().to_path_buf(); - let synced_sha = tokio::task::spawn_blocking(move || { - sync_openai_plugins_repo_with_transport_overrides( - tmp_path.as_path(), - "missing-git-for-test", - &server_uri, - ) - }) + let synced_sha = run_sync_with_transport_overrides( + tmp.path().to_path_buf(), + "missing-git-for-test", + server.uri(), + "http://127.0.0.1:9/backend-api/plugins/export/curated", + ) .await - .expect("sync task should join") .expect("fallback sync should succeed"); let repo_path = curated_plugins_repo_path(tmp.path()); assert_eq!(synced_sha, sha); - assert!(repo_path.join(".agents/plugins/marketplace.json").is_file()); - assert!( - repo_path - .join("plugins/gmail/.codex-plugin/plugin.json") - .is_file() - ); + assert_curated_gmail_repo(&repo_path); assert_eq!(read_curated_plugins_sha(tmp.path()).as_deref(), Some(sha)); } #[cfg(unix)] #[tokio::test] async fn sync_openai_plugins_repo_falls_back_to_http_when_git_sync_fails() { - use std::os::unix::fs::PermissionsExt; - let tmp = tempdir().expect("tempdir"); let bin_dir = tempfile::Builder::new() .prefix("fake-git-fail-") @@ -226,73 +406,36 @@ async fn sync_openai_plugins_repo_falls_back_to_http_when_git_sync_fails() { let git_path = bin_dir.path().join("git"); let sha = "0123456789abcdef0123456789abcdef01234567"; - std::fs::write( + write_executable_script( &git_path, r#"#!/bin/sh echo "simulated git failure" >&2 exit 1 "#, - ) - .expect("write fake git"); - let mut permissions = std::fs::metadata(&git_path) - .expect("metadata") - .permissions(); - permissions.set_mode(0o755); - std::fs::set_permissions(&git_path, permissions).expect("chmod"); + ); let server = MockServer::start().await; - Mock::given(method("GET")) - .and(path("/repos/openai/plugins")) - .respond_with(ResponseTemplate::new(200).set_body_string(r#"{"default_branch":"main"}"#)) - .mount(&server) - .await; - Mock::given(method("GET")) - .and(path("/repos/openai/plugins/git/ref/heads/main")) - .respond_with( - ResponseTemplate::new(200) - .set_body_string(format!(r#"{{"object":{{"sha":"{sha}"}}}}"#)), - ) - .mount(&server) - .await; - Mock::given(method("GET")) - .and(path(format!("/repos/openai/plugins/zipball/{sha}"))) - .respond_with( - ResponseTemplate::new(200) - .insert_header("content-type", "application/zip") - .set_body_bytes(curated_repo_zipball_bytes(sha)), - ) - .mount(&server) - .await; + mount_github_repo_and_ref(&server, sha).await; + mount_github_zipball(&server, sha, curated_repo_zipball_bytes(sha)).await; - let server_uri = server.uri(); - let tmp_path = tmp.path().to_path_buf(); - let synced_sha = tokio::task::spawn_blocking(move || { - sync_openai_plugins_repo_with_transport_overrides( - tmp_path.as_path(), - git_path.to_str().expect("utf8 path"), - &server_uri, - ) - }) + let synced_sha = run_sync_with_transport_overrides( + tmp.path().to_path_buf(), + git_path.to_str().expect("utf8 path"), + server.uri(), + "http://127.0.0.1:9/backend-api/plugins/export/curated", + ) .await - .expect("sync task should join") .expect("fallback sync should succeed"); let repo_path = curated_plugins_repo_path(tmp.path()); assert_eq!(synced_sha, sha); - assert!(repo_path.join(".agents/plugins/marketplace.json").is_file()); - assert!( - repo_path - .join("plugins/gmail/.codex-plugin/plugin.json") - .is_file() - ); + assert_curated_gmail_repo(&repo_path); assert_eq!(read_curated_plugins_sha(tmp.path()).as_deref(), Some(sha)); } #[cfg(unix)] #[test] fn sync_openai_plugins_repo_via_git_cleans_up_staged_dir_on_clone_failure() { - use std::os::unix::fs::PermissionsExt; - let tmp = tempdir().expect("tempdir"); let bin_dir = tempfile::Builder::new() .prefix("fake-git-partial-fail-") @@ -301,9 +444,9 @@ fn sync_openai_plugins_repo_via_git_cleans_up_staged_dir_on_clone_failure() { let git_path = bin_dir.path().join("git"); let sha = "0123456789abcdef0123456789abcdef01234567"; - std::fs::write( + write_executable_script( &git_path, - format!( + &format!( r#"#!/bin/sh if [ "$1" = "ls-remote" ]; then printf '%s\tHEAD\n' "{sha}" @@ -319,13 +462,7 @@ echo "unexpected git invocation: $@" >&2 exit 1 "# ), - ) - .expect("write fake git"); - let mut permissions = std::fs::metadata(&git_path) - .expect("metadata") - .permissions(); - permissions.set_mode(0o755); - std::fs::set_permissions(&git_path, permissions).expect("chmod"); + ); let err = sync_openai_plugins_repo_via_git(tmp.path(), git_path.to_str().expect("utf8 path")) .expect_err("git sync should fail"); @@ -340,37 +477,12 @@ async fn sync_openai_plugins_repo_via_http_cleans_up_staged_dir_on_extract_failu let server = MockServer::start().await; let sha = "0123456789abcdef0123456789abcdef01234567"; - Mock::given(method("GET")) - .and(path("/repos/openai/plugins")) - .respond_with(ResponseTemplate::new(200).set_body_string(r#"{"default_branch":"main"}"#)) - .mount(&server) - .await; - Mock::given(method("GET")) - .and(path("/repos/openai/plugins/git/ref/heads/main")) - .respond_with( - ResponseTemplate::new(200) - .set_body_string(format!(r#"{{"object":{{"sha":"{sha}"}}}}"#)), - ) - .mount(&server) - .await; - Mock::given(method("GET")) - .and(path(format!("/repos/openai/plugins/zipball/{sha}"))) - .respond_with( - ResponseTemplate::new(200) - .insert_header("content-type", "application/zip") - .set_body_bytes(b"not a zip archive".to_vec()), - ) - .mount(&server) - .await; + mount_github_repo_and_ref(&server, sha).await; + mount_github_zipball(&server, sha, b"not a zip archive".to_vec()).await; - let server_uri = server.uri(); - let tmp_path = tmp.path().to_path_buf(); - let err = tokio::task::spawn_blocking(move || { - sync_openai_plugins_repo_via_http(tmp_path.as_path(), &server_uri) - }) - .await - .expect("sync task should join") - .expect_err("http sync should fail"); + let err = run_http_sync(tmp.path().to_path_buf(), server.uri()) + .await + .expect_err("http sync should fail"); assert!(err.contains("failed to open curated plugins zip archive")); assert!(!has_plugins_clone_dirs(tmp.path())); @@ -391,37 +503,141 @@ async fn sync_openai_plugins_repo_skips_archive_download_when_sha_matches() { std::fs::write(tmp.path().join(".tmp/plugins.sha"), format!("{sha}\n")).expect("write sha"); let server = MockServer::start().await; - Mock::given(method("GET")) - .and(path("/repos/openai/plugins")) - .respond_with(ResponseTemplate::new(200).set_body_string(r#"{"default_branch":"main"}"#)) - .mount(&server) - .await; - Mock::given(method("GET")) - .and(path("/repos/openai/plugins/git/ref/heads/main")) - .respond_with( - ResponseTemplate::new(200) - .set_body_string(format!(r#"{{"object":{{"sha":"{sha}"}}}}"#)), - ) - .mount(&server) - .await; + mount_github_repo_and_ref(&server, sha).await; - let server_uri = server.uri(); - let tmp_path = tmp.path().to_path_buf(); - tokio::task::spawn_blocking(move || { - sync_openai_plugins_repo_with_transport_overrides( - tmp_path.as_path(), - "missing-git-for-test", - &server_uri, - ) - }) + run_sync_with_transport_overrides( + tmp.path().to_path_buf(), + "missing-git-for-test", + server.uri(), + "http://127.0.0.1:9/backend-api/plugins/export/curated", + ) .await - .expect("sync task should join") .expect("sync should succeed"); assert_eq!(read_curated_plugins_sha(tmp.path()).as_deref(), Some(sha)); assert!(repo_path.join(".agents/plugins/marketplace.json").is_file()); } +#[tokio::test] +async fn sync_openai_plugins_repo_falls_back_to_export_archive_when_no_snapshot_exists() { + let tmp = tempdir().expect("tempdir"); + let server = MockServer::start().await; + let export_sha = "1111111111111111111111111111111111111111"; + + Mock::given(method("GET")) + .and(path("/repos/openai/plugins")) + .respond_with(ResponseTemplate::new(500).set_body_string("github repo lookup failed")) + .mount(&server) + .await; + let export_api_url = + mount_export_archive(&server, curated_repo_backup_archive_zip_bytes(export_sha)).await; + + let synced_sha = run_sync_with_transport_overrides( + tmp.path().to_path_buf(), + "missing-git-for-test", + server.uri(), + export_api_url, + ) + .await + .expect("export fallback sync should succeed"); + + let repo_path = curated_plugins_repo_path(tmp.path()); + assert_eq!(synced_sha, export_sha); + assert_curated_gmail_repo(&repo_path); + assert_eq!( + read_curated_plugins_sha(tmp.path()).as_deref(), + Some(export_sha) + ); +} + +#[tokio::test] +async fn sync_openai_plugins_repo_skips_export_archive_when_snapshot_exists() { + let tmp = tempdir().expect("tempdir"); + let curated_root = curated_plugins_repo_path(tmp.path()); + write_openai_curated_marketplace(&curated_root, &["linear"]); + write_curated_plugin_sha(tmp.path()); + + let plugin_manifest_path = curated_root.join("plugins/linear/.codex-plugin/plugin.json"); + let original_manifest = + std::fs::read_to_string(&plugin_manifest_path).expect("read existing plugin manifest"); + + let server = MockServer::start().await; + + Mock::given(method("GET")) + .and(path("/repos/openai/plugins")) + .respond_with(ResponseTemplate::new(500).set_body_string("github repo lookup failed")) + .mount(&server) + .await; + let export_api_url = mount_export_archive( + &server, + curated_repo_backup_archive_zip_bytes("2222222222222222222222222222222222222222"), + ) + .await; + + let err = run_sync_with_transport_overrides( + tmp.path().to_path_buf(), + "missing-git-for-test", + server.uri(), + export_api_url, + ) + .await + .expect_err("existing snapshot should suppress export fallback"); + + assert!(err.contains("export archive fallback skipped")); + assert_eq!( + std::fs::read_to_string(&plugin_manifest_path).expect("read plugin manifest after sync"), + original_manifest + ); + assert_eq!( + read_curated_plugins_sha(tmp.path()).as_deref(), + Some(TEST_CURATED_PLUGIN_SHA) + ); +} + +#[test] +fn read_extracted_backup_archive_git_sha_reads_head_ref_from_extracted_repo() { + let tmp = tempdir().expect("tempdir"); + let git_dir = tmp.path().join(".git/refs/heads"); + std::fs::create_dir_all(&git_dir).expect("create git ref dir"); + std::fs::write(tmp.path().join(".git/HEAD"), "ref: refs/heads/main\n").expect("write HEAD"); + std::fs::write( + git_dir.join("main"), + "3333333333333333333333333333333333333333\n", + ) + .expect("write main ref"); + + assert_eq!( + read_extracted_backup_archive_git_sha(tmp.path()) + .expect("read extracted backup archive git sha"), + Some("3333333333333333333333333333333333333333".to_string()) + ); +} + +#[test] +fn read_extracted_backup_archive_git_sha_rejects_non_refs_head_target() { + let tmp = tempdir().expect("tempdir"); + std::fs::create_dir_all(tmp.path().join(".git")).expect("create git dir"); + std::fs::write(tmp.path().join(".git/HEAD"), "ref: HEAD\n").expect("write HEAD"); + + let err = read_extracted_backup_archive_git_sha(tmp.path()) + .expect_err("non-refs target should be rejected"); + + assert!(err.contains("must stay under refs/")); +} + +#[test] +fn read_extracted_backup_archive_git_sha_rejects_path_traversal_ref() { + let tmp = tempdir().expect("tempdir"); + std::fs::create_dir_all(tmp.path().join(".git")).expect("create git dir"); + std::fs::write(tmp.path().join(".git/HEAD"), "ref: refs/heads/../../evil\n") + .expect("write HEAD"); + + let err = read_extracted_backup_archive_git_sha(tmp.path()) + .expect_err("path traversal ref should be rejected"); + + assert!(err.contains("invalid path components")); +} + #[tokio::test] async fn startup_remote_plugin_sync_writes_marker_and_reconciles_state() { let tmp = tempdir().expect("tempdir"); @@ -528,3 +744,49 @@ fn curated_repo_zipball_bytes(sha: &str) -> Vec { writer.finish().expect("finish zip writer").into_inner() } + +fn curated_repo_backup_archive_zip_bytes(sha: &str) -> Vec { + let cursor = std::io::Cursor::new(Vec::new()); + let mut writer = ZipWriter::new(cursor); + let options = SimpleFileOptions::default(); + + writer + .start_file("plugins/.git/HEAD", options) + .expect("start HEAD entry"); + writer + .write_all(b"ref: refs/heads/main\n") + .expect("write HEAD"); + writer + .start_file("plugins/.git/refs/heads/main", options) + .expect("start main ref entry"); + writer + .write_all(format!("{sha}\n").as_bytes()) + .expect("write main ref"); + writer + .start_file("plugins/.agents/plugins/marketplace.json", options) + .expect("start marketplace entry"); + writer + .write_all( + br#"{ + "name": "openai-curated", + "plugins": [ + { + "name": "gmail", + "source": { + "source": "local", + "path": "./plugins/gmail" + } + } + ] +}"#, + ) + .expect("write marketplace"); + writer + .start_file("plugins/plugins/gmail/.codex-plugin/plugin.json", options) + .expect("start plugin manifest entry"); + writer + .write_all(br#"{"name":"gmail"}"#) + .expect("write plugin manifest"); + + writer.finish().expect("finish zip writer").into_inner() +} diff --git a/codex-rs/core/src/plugins/store.rs b/codex-rs/core/src/plugins/store.rs index faa7fc0812..4316f18002 100644 --- a/codex-rs/core/src/plugins/store.rs +++ b/codex-rs/core/src/plugins/store.rs @@ -1,8 +1,11 @@ use super::load_plugin_manifest; +use super::manifest::PluginManifest; use codex_plugin::PluginId; use codex_plugin::validate_plugin_segment; use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_plugins::PLUGIN_MANIFEST_PATH; +use serde::Deserialize; +use serde_json::Value as JsonValue; use std::fs; use std::io; use std::path::Path; @@ -62,7 +65,7 @@ impl PluginStore { entry.file_type().ok().filter(std::fs::FileType::is_dir)?; entry.file_name().into_string().ok() }) - .filter(|version| validate_plugin_segment(version, "plugin version").is_ok()) + .filter(|version| validate_plugin_version_segment(version).is_ok()) .collect::>(); discovered_versions.sort_unstable(); if discovered_versions.is_empty() { @@ -91,7 +94,8 @@ impl PluginStore { source_path: AbsolutePathBuf, plugin_id: PluginId, ) -> Result { - self.install_with_version(source_path, plugin_id, DEFAULT_PLUGIN_VERSION.to_string()) + let plugin_version = plugin_version_for_source(source_path.as_path())?; + self.install_with_version(source_path, plugin_id, plugin_version) } pub fn install_with_version( @@ -114,8 +118,7 @@ impl PluginStore { plugin_id.plugin_name ))); } - validate_plugin_segment(&plugin_version, "plugin version") - .map_err(PluginStoreError::Invalid)?; + validate_plugin_version_segment(&plugin_version).map_err(PluginStoreError::Invalid)?; let installed_path = self.plugin_root(&plugin_id, &plugin_version); replace_plugin_root_atomically( source_path.as_path(), @@ -154,7 +157,33 @@ impl PluginStoreError { } } -fn plugin_name_for_source(source_path: &Path) -> Result { +pub(crate) fn plugin_version_for_source(source_path: &Path) -> Result { + let plugin_version = plugin_manifest_version_for_source(source_path)? + .unwrap_or_else(|| DEFAULT_PLUGIN_VERSION.to_string()); + validate_plugin_version_segment(&plugin_version).map_err(PluginStoreError::Invalid)?; + Ok(plugin_version) +} + +fn validate_plugin_version_segment(plugin_version: &str) -> Result<(), String> { + if plugin_version.is_empty() { + return Err("invalid plugin version: must not be empty".to_string()); + } + if matches!(plugin_version, "." | "..") { + return Err("invalid plugin version: path traversal is not allowed".to_string()); + } + if !plugin_version + .chars() + .all(|ch| ch.is_ascii_alphanumeric() || matches!(ch, '-' | '_' | '.' | '+')) + { + return Err( + "invalid plugin version: only ASCII letters, digits, `.`, `+`, `_`, and `-` are allowed" + .to_string(), + ); + } + Ok(()) +} + +fn plugin_manifest_for_source(source_path: &Path) -> Result { let manifest_path = source_path.join(PLUGIN_MANIFEST_PATH); if !manifest_path.is_file() { return Err(PluginStoreError::Invalid(format!( @@ -163,12 +192,61 @@ fn plugin_name_for_source(source_path: &Path) -> Result, +} + +fn plugin_manifest_version_for_source( + source_path: &Path, +) -> Result, PluginStoreError> { + let manifest_path = source_path.join(PLUGIN_MANIFEST_PATH); + if !manifest_path.is_file() { + return Err(PluginStoreError::Invalid(format!( + "missing plugin manifest: {}", + manifest_path.display() + ))); + } + + let contents = fs::read_to_string(&manifest_path) + .map_err(|err| PluginStoreError::io("failed to read plugin manifest", err))?; + let manifest: RawPluginManifestVersion = serde_json::from_str(&contents).map_err(|err| { + PluginStoreError::Invalid(format!( + "failed to parse plugin manifest {}: {err}", + manifest_path.display() + )) })?; + let Some(version) = manifest.version else { + return Ok(None); + }; + let Some(version) = version.as_str() else { + return Err(PluginStoreError::Invalid(format!( + "invalid plugin version in manifest {}: expected string", + manifest_path.display() + ))); + }; + let version = version.trim(); + if version.is_empty() { + return Err(PluginStoreError::Invalid(format!( + "invalid plugin version in manifest {}: must not be blank", + manifest_path.display() + ))); + } + Ok(Some(version.to_string())) +} + +fn plugin_name_for_source(source_path: &Path) -> Result { + let manifest = plugin_manifest_for_source(source_path)?; let plugin_name = manifest.name; validate_plugin_segment(&plugin_name, "plugin name") diff --git a/codex-rs/core/src/plugins/store_tests.rs b/codex-rs/core/src/plugins/store_tests.rs index e75e71c568..cb23443e80 100644 --- a/codex-rs/core/src/plugins/store_tests.rs +++ b/codex-rs/core/src/plugins/store_tests.rs @@ -3,19 +3,36 @@ use codex_plugin::PluginId; use pretty_assertions::assert_eq; use tempfile::tempdir; -fn write_plugin(root: &Path, dir_name: &str, manifest_name: &str) { +fn write_plugin_with_version( + root: &Path, + dir_name: &str, + manifest_name: &str, + manifest_version: Option<&str>, +) { let plugin_root = root.join(dir_name); fs::create_dir_all(plugin_root.join(".codex-plugin")).unwrap(); fs::create_dir_all(plugin_root.join("skills")).unwrap(); + let version = manifest_version + .map(|manifest_version| format!(r#","version":"{manifest_version}""#)) + .unwrap_or_default(); fs::write( plugin_root.join(".codex-plugin/plugin.json"), - format!(r#"{{"name":"{manifest_name}"}}"#), + format!(r#"{{"name":"{manifest_name}"{version}}}"#), ) .unwrap(); fs::write(plugin_root.join("skills/SKILL.md"), "skill").unwrap(); fs::write(plugin_root.join(".mcp.json"), r#"{"mcpServers":{}}"#).unwrap(); } +fn write_plugin(root: &Path, dir_name: &str, manifest_name: &str) { + write_plugin_with_version( + root, + dir_name, + manifest_name, + /*manifest_version*/ None, + ); +} + #[test] fn install_copies_plugin_into_default_marketplace() { let tmp = tempdir().unwrap(); @@ -110,6 +127,62 @@ fn install_with_version_uses_requested_cache_version() { assert!(installed_path.join(".codex-plugin/plugin.json").is_file()); } +#[test] +fn install_uses_manifest_version_when_present() { + let tmp = tempdir().unwrap(); + write_plugin_with_version( + tmp.path(), + "sample-plugin", + "sample-plugin", + Some("1.2.3-beta+7"), + ); + let plugin_id = PluginId::new("sample-plugin".to_string(), "debug".to_string()).unwrap(); + + let result = PluginStore::new(tmp.path().to_path_buf()) + .install( + AbsolutePathBuf::try_from(tmp.path().join("sample-plugin")).unwrap(), + plugin_id.clone(), + ) + .unwrap(); + + let installed_path = tmp + .path() + .join("plugins/cache/debug/sample-plugin/1.2.3-beta+7"); + assert_eq!( + result, + PluginInstallResult { + plugin_id, + plugin_version: "1.2.3-beta+7".to_string(), + installed_path: AbsolutePathBuf::try_from(installed_path.clone()).unwrap(), + } + ); + assert!(installed_path.join(".codex-plugin/plugin.json").is_file()); +} + +#[test] +fn install_rejects_blank_manifest_version() { + let tmp = tempdir().unwrap(); + write_plugin_with_version(tmp.path(), "sample-plugin", "sample-plugin", Some(" ")); + let plugin_id = PluginId::new("sample-plugin".to_string(), "debug".to_string()).unwrap(); + + let err = PluginStore::new(tmp.path().to_path_buf()) + .install( + AbsolutePathBuf::try_from(tmp.path().join("sample-plugin")).unwrap(), + plugin_id, + ) + .expect_err("blank manifest version should be rejected"); + let err = err.to_string().replace('\\', "/"); + + assert!( + err.starts_with("invalid plugin version in manifest "), + "unexpected error: {err}" + ); + assert!( + err.ends_with("sample-plugin/.codex-plugin/plugin.json: must not be blank"), + "unexpected error: {err}" + ); +} + #[test] fn active_plugin_version_reads_version_directory_name() { let tmp = tempdir().unwrap(); diff --git a/codex-rs/core/src/project_doc.rs b/codex-rs/core/src/project_doc.rs index 1b72f64610..e7321695a7 100644 --- a/codex-rs/core/src/project_doc.rs +++ b/codex-rs/core/src/project_doc.rs @@ -21,10 +21,12 @@ use crate::config_loader::default_project_root_markers; use crate::config_loader::merge_toml_values; use crate::config_loader::project_root_markers_from_config; use codex_app_server_protocol::ConfigLayerSource; +use codex_exec_server::Environment; +use codex_exec_server::ExecutorFileSystem; use codex_features::Feature; +use codex_utils_absolute_path::AbsolutePathBuf; use dunce::canonicalize as normalize_path; -use std::path::PathBuf; -use tokio::io::AsyncReadExt; +use std::io; use toml::Value as TomlValue; use tracing::error; @@ -76,8 +78,19 @@ fn render_js_repl_instructions(config: &Config) -> Option { /// Combines `Config::instructions` and `AGENTS.md` (if present) into a single /// string of instructions. -pub(crate) async fn get_user_instructions(config: &Config) -> Option { - let project_docs = read_project_docs(config).await; +pub(crate) async fn get_user_instructions( + config: &Config, + environment: Option<&Environment>, +) -> Option { + let fs = environment?.get_filesystem(); + get_user_instructions_with_fs(config, fs.as_ref()).await +} + +pub(crate) async fn get_user_instructions_with_fs( + config: &Config, + fs: &dyn ExecutorFileSystem, +) -> Option { + let project_docs = read_project_docs_with_fs(config, fs).await; let mut output = String::new(); @@ -125,14 +138,25 @@ pub(crate) async fn get_user_instructions(config: &Config) -> Option { /// concatenation of all discovered docs. If no documentation file is found the /// function returns `Ok(None)`. Unexpected I/O failures bubble up as `Err` so /// callers can decide how to handle them. -pub async fn read_project_docs(config: &Config) -> std::io::Result> { +pub async fn read_project_docs( + config: &Config, + environment: &Environment, +) -> io::Result> { + let fs = environment.get_filesystem(); + read_project_docs_with_fs(config, fs.as_ref()).await +} + +async fn read_project_docs_with_fs( + config: &Config, + fs: &dyn ExecutorFileSystem, +) -> io::Result> { let max_total = config.project_doc_max_bytes; if max_total == 0 { return Ok(None); } - let paths = discover_project_doc_paths(config)?; + let paths = discover_project_doc_paths(config, fs).await?; if paths.is_empty() { return Ok(None); } @@ -145,16 +169,22 @@ pub async fn read_project_docs(config: &Config) -> std::io::Result f, - Err(e) if e.kind() == std::io::ErrorKind::NotFound => continue, - Err(e) => return Err(e), - }; + match fs.get_metadata(&p).await { + Ok(metadata) if !metadata.is_file => continue, + Ok(_) => {} + Err(err) if err.kind() == io::ErrorKind::NotFound => continue, + Err(err) => return Err(err), + } - let size = file.metadata().await?.len(); - let mut reader = tokio::io::BufReader::new(file).take(remaining); - let mut data: Vec = Vec::new(); - reader.read_to_end(&mut data).await?; + let mut data = match fs.read_file(&p).await { + Ok(data) => data, + Err(err) if err.kind() == io::ErrorKind::NotFound => continue, + Err(err) => return Err(err), + }; + let size = data.len() as u64; + if size > remaining { + data.truncate(remaining as usize); + } if size > remaining { tracing::warn!( @@ -183,10 +213,17 @@ pub async fn read_project_docs(config: &Config) -> std::io::Result std::io::Result> { - let mut dir = config.cwd.to_path_buf(); +pub async fn discover_project_doc_paths( + config: &Config, + fs: &dyn ExecutorFileSystem, +) -> io::Result> { + if config.project_doc_max_bytes == 0 { + return Ok(Vec::new()); + } + + let mut dir = config.cwd.clone(); if let Ok(canon) = normalize_path(&dir) { - dir = canon; + dir = AbsolutePathBuf::try_from(canon)?; } let mut merged = TomlValue::Table(toml::map::Map::new()); @@ -211,14 +248,14 @@ pub fn discover_project_doc_paths(config: &Config) -> std::io::Result true, - Err(e) if e.kind() == std::io::ErrorKind::NotFound => false, - Err(e) => return Err(e), + Err(err) if err.kind() == io::ErrorKind::NotFound => false, + Err(err) => return Err(err), }; if marker_exists { - project_root = Some(ancestor.to_path_buf()); + project_root = Some(AbsolutePathBuf::try_from(ancestor.to_path_buf())?); break; } } @@ -228,11 +265,11 @@ pub fn discover_project_doc_paths(config: &Config) -> std::io::Result = if let Some(root) = project_root { + let search_dirs: Vec = if let Some(root) = project_root { let mut dirs = Vec::new(); - let mut cursor = dir.as_path(); + let mut cursor = dir.clone(); loop { - dirs.push(cursor.to_path_buf()); + dirs.push(cursor.clone()); if cursor == root { break; } @@ -247,29 +284,25 @@ pub fn discover_project_doc_paths(config: &Config) -> std::io::Result = Vec::new(); + let mut found: Vec = Vec::new(); let candidate_filenames = candidate_filenames(config); for d in search_dirs { for name in &candidate_filenames { let candidate = d.join(name); - match std::fs::symlink_metadata(&candidate) { - Ok(md) => { - let ft = md.file_type(); - // Allow regular files and symlinks; opening will later fail for dangling links. - if ft.is_file() || ft.is_symlink() { - found.push(candidate); - break; - } + match fs.get_metadata(&candidate).await { + Ok(md) if md.is_file => { + found.push(candidate); + break; } - Err(e) if e.kind() == std::io::ErrorKind::NotFound => continue, - Err(e) => return Err(e), + Ok(_) => {} + Err(err) if err.kind() == io::ErrorKind::NotFound => continue, + Err(err) => return Err(err), } } } Ok(found) } - fn candidate_filenames<'a>(config: &'a Config) -> Vec<&'a str> { let mut names: Vec<&'a str> = Vec::with_capacity(2 + config.project_doc_fallback_filenames.len()); diff --git a/codex-rs/core/src/project_doc_tests.rs b/codex-rs/core/src/project_doc_tests.rs index 9f4d9c5286..c8caf2ff9a 100644 --- a/codex-rs/core/src/project_doc_tests.rs +++ b/codex-rs/core/src/project_doc_tests.rs @@ -1,12 +1,23 @@ use super::*; use crate::config::ConfigBuilder; +use codex_exec_server::LOCAL_FS; use codex_features::Feature; +use codex_utils_absolute_path::AbsolutePathBuf; use core_test_support::PathBufExt; use core_test_support::TempDirExt; +use pretty_assertions::assert_eq; use std::fs; use std::path::PathBuf; use tempfile::TempDir; +async fn get_user_instructions(config: &Config) -> Option { + super::get_user_instructions_with_fs(config, LOCAL_FS.as_ref()).await +} + +async fn discover_project_doc_paths(config: &Config) -> std::io::Result> { + super::discover_project_doc_paths(config, LOCAL_FS.as_ref()).await +} + /// Helper that returns a `Config` pointing at `root` and using `limit` as /// the maximum number of bytes to embed from AGENTS.md. The caller can /// optionally specify a custom `instructions` string – when `None` the @@ -85,6 +96,16 @@ async fn no_doc_file_returns_none() { assert!(res.is_none(), "Expected None when AGENTS.md is absent"); } +#[tokio::test] +async fn no_environment_returns_none() { + let tmp = tempfile::tempdir().expect("tempdir"); + let config = make_config(&tmp, /*limit*/ 4096, Some("user instructions")).await; + + let res = super::get_user_instructions(&config, /*environment*/ None).await; + + assert_eq!(res, None); +} + /// Small file within the byte-limit is returned unmodified. #[tokio::test] async fn doc_smaller_than_limit_is_returned() { @@ -161,6 +182,18 @@ async fn zero_byte_limit_disables_docs() { ); } +#[tokio::test] +async fn zero_byte_limit_disables_discovery() { + let tmp = tempfile::tempdir().expect("tempdir"); + fs::write(tmp.path().join("AGENTS.md"), "something").unwrap(); + + let discovery = + discover_project_doc_paths(&make_config(&tmp, /*limit*/ 0, /*instructions*/ None).await) + .await + .expect("discover paths"); + assert_eq!(discovery, Vec::::new()); +} + #[tokio::test] async fn js_repl_instructions_are_appended_when_enabled() { let tmp = tempfile::tempdir().expect("tempdir"); @@ -293,11 +326,17 @@ async fn project_root_markers_are_honored_for_agents_discovery() { .await; cfg.cwd = nested.abs(); - let discovery = discover_project_doc_paths(&cfg).expect("discover paths"); - let expected_parent = - dunce::canonicalize(root.path().join("AGENTS.md")).expect("canonical parent doc path"); - let expected_child = - dunce::canonicalize(cfg.cwd.as_path().join("AGENTS.md")).expect("canonical child doc path"); + let discovery = discover_project_doc_paths(&cfg) + .await + .expect("discover paths"); + let expected_parent = AbsolutePathBuf::try_from( + dunce::canonicalize(root.path().join("AGENTS.md")).expect("canonical parent doc path"), + ) + .expect("absolute parent doc path"); + let expected_child = AbsolutePathBuf::try_from( + dunce::canonicalize(cfg.cwd.join("AGENTS.md")).expect("canonical child doc path"), + ) + .expect("absolute child doc path"); assert_eq!(discovery.len(), 2); assert_eq!(discovery[0], expected_parent); assert_eq!(discovery[1], expected_child); @@ -321,7 +360,9 @@ async fn agents_local_md_preferred() { assert_eq!(res, "local"); - let discovery = discover_project_doc_paths(&cfg).expect("discover paths"); + let discovery = discover_project_doc_paths(&cfg) + .await + .expect("discover paths"); assert_eq!(discovery.len(), 1); assert_eq!( discovery[0].file_name().unwrap().to_string_lossy(), @@ -371,7 +412,9 @@ async fn agents_md_preferred_over_fallbacks() { assert_eq!(res, "primary"); - let discovery = discover_project_doc_paths(&cfg).expect("discover paths"); + let discovery = discover_project_doc_paths(&cfg) + .await + .expect("discover paths"); assert_eq!(discovery.len(), 1); assert!( discovery[0] @@ -382,6 +425,73 @@ async fn agents_md_preferred_over_fallbacks() { ); } +#[tokio::test] +async fn agents_md_directory_is_ignored() { + let tmp = tempfile::tempdir().expect("tempdir"); + fs::create_dir(tmp.path().join("AGENTS.md")).unwrap(); + + let cfg = make_config(&tmp, /*limit*/ 4096, /*instructions*/ None).await; + + let res = get_user_instructions(&cfg).await; + assert_eq!(res, None); + + let discovery = discover_project_doc_paths(&cfg) + .await + .expect("discover paths"); + assert_eq!(discovery, Vec::::new()); +} + +#[cfg(unix)] +#[tokio::test] +async fn agents_md_special_file_is_ignored() { + use std::ffi::CString; + use std::os::unix::ffi::OsStrExt; + + let tmp = tempfile::tempdir().expect("tempdir"); + let path = tmp.path().join("AGENTS.md"); + let c_path = CString::new(path.as_os_str().as_bytes()).expect("path without nul"); + // SAFETY: `c_path` is a valid, nul-terminated path and `mkfifo` does not + // retain the pointer after the call. + let rc = unsafe { libc::mkfifo(c_path.as_ptr(), 0o644) }; + assert_eq!(rc, 0); + + let cfg = make_config(&tmp, /*limit*/ 4096, /*instructions*/ None).await; + + let res = get_user_instructions(&cfg).await; + assert_eq!(res, None); + + let discovery = discover_project_doc_paths(&cfg) + .await + .expect("discover paths"); + assert_eq!(discovery, Vec::::new()); +} + +#[tokio::test] +async fn override_directory_falls_back_to_agents_md_file() { + let tmp = tempfile::tempdir().expect("tempdir"); + fs::create_dir(tmp.path().join(LOCAL_PROJECT_DOC_FILENAME)).unwrap(); + fs::write(tmp.path().join(DEFAULT_PROJECT_DOC_FILENAME), "primary").unwrap(); + + let cfg = make_config(&tmp, /*limit*/ 4096, /*instructions*/ None).await; + + let res = get_user_instructions(&cfg) + .await + .expect("AGENTS.md should be used when override is a directory"); + assert_eq!(res, "primary"); + + let discovery = discover_project_doc_paths(&cfg) + .await + .expect("discover paths"); + assert_eq!(discovery.len(), 1); + assert_eq!( + discovery[0] + .file_name() + .expect("file name") + .to_string_lossy(), + DEFAULT_PROJECT_DOC_FILENAME + ); +} + #[tokio::test] async fn skills_are_not_appended_to_project_doc() { let tmp = tempfile::tempdir().expect("tempdir"); diff --git a/codex-rs/core/src/prompt_debug.rs b/codex-rs/core/src/prompt_debug.rs index 4a8513bb49..5fe39ee88f 100644 --- a/codex-rs/core/src/prompt_debug.rs +++ b/codex-rs/core/src/prompt_debug.rs @@ -26,12 +26,8 @@ pub async fn build_prompt_input( ) -> CodexResult> { config.ephemeral = true; - let auth_manager = AuthManager::shared( - config.codex_home.clone(), - /*enable_codex_api_key_env*/ false, - config.cli_auth_credentials_store_mode, - ); - auth_manager.set_forced_chatgpt_workspace_id(config.forced_chatgpt_workspace_id.clone()); + let auth_manager = + AuthManager::shared_from_config(&config, /*enable_codex_api_key_env*/ false); let thread_manager = ThreadManager::new( &config, diff --git a/codex-rs/core/src/realtime_conversation.rs b/codex-rs/core/src/realtime_conversation.rs index dae8f5c5ce..16f88650a4 100644 --- a/codex-rs/core/src/realtime_conversation.rs +++ b/codex-rs/core/src/realtime_conversation.rs @@ -1,6 +1,4 @@ use crate::codex::Session; -use crate::config::RealtimeWsMode; -use crate::config::RealtimeWsVersion; use crate::realtime_context::build_realtime_startup_context; use async_channel::Receiver; use async_channel::Sender; @@ -14,10 +12,12 @@ use codex_api::RealtimeEventParser; use codex_api::RealtimeSessionConfig; use codex_api::RealtimeSessionMode; use codex_api::RealtimeWebsocketClient; -use codex_api::api_bridge::map_api_error; -use codex_api::endpoint::realtime_websocket::RealtimeWebsocketEvents; -use codex_api::endpoint::realtime_websocket::RealtimeWebsocketWriter; +use codex_api::RealtimeWebsocketEvents; +use codex_api::RealtimeWebsocketWriter; +use codex_api::map_api_error; use codex_app_server_protocol::AuthMode; +use codex_config::config_toml::RealtimeWsMode; +use codex_config::config_toml::RealtimeWsVersion; use codex_login::CodexAuth; use codex_login::default_client::default_headers; use codex_login::read_openai_api_key_from_env; diff --git a/codex-rs/core/src/rollout.rs b/codex-rs/core/src/rollout.rs index c3a7218710..492b0bc058 100644 --- a/codex-rs/core/src/rollout.rs +++ b/codex-rs/core/src/rollout.rs @@ -1,17 +1,26 @@ use crate::config::Config; pub use codex_rollout::ARCHIVED_SESSIONS_SUBDIR; +pub use codex_rollout::Cursor; +pub use codex_rollout::EventPersistenceMode; pub use codex_rollout::INTERACTIVE_SESSION_SOURCES; pub use codex_rollout::RolloutRecorder; pub use codex_rollout::RolloutRecorderParams; pub use codex_rollout::SESSIONS_SUBDIR; pub use codex_rollout::SessionMeta; +pub use codex_rollout::ThreadItem; +pub use codex_rollout::ThreadSortKey; +pub use codex_rollout::ThreadsPage; pub use codex_rollout::append_thread_name; pub use codex_rollout::find_archived_thread_path_by_id_str; #[deprecated(note = "use find_thread_path_by_id_str")] pub use codex_rollout::find_conversation_path_by_id_str; pub use codex_rollout::find_thread_name_by_id; +pub use codex_rollout::find_thread_names_by_ids; pub use codex_rollout::find_thread_path_by_id_str; pub use codex_rollout::find_thread_path_by_name_str; +pub use codex_rollout::parse_cursor; +pub use codex_rollout::read_head_for_summary; +pub use codex_rollout::read_session_meta_line; pub use codex_rollout::rollout_date_parts; impl codex_rollout::RolloutConfigView for Config { @@ -36,24 +45,30 @@ impl codex_rollout::RolloutConfigView for Config { } } -pub mod list { - pub use codex_rollout::list::*; +pub(crate) mod list { + pub use codex_rollout::ThreadListConfig; + pub use codex_rollout::ThreadListLayout; + pub use codex_rollout::ThreadSortKey; + pub use codex_rollout::find_thread_path_by_id_str; + pub use codex_rollout::get_threads_in_root; } pub(crate) mod metadata { - pub(crate) use codex_rollout::metadata::builder_from_items; + pub(crate) use codex_rollout::builder_from_items; } -pub mod policy { - pub use codex_rollout::policy::*; +pub(crate) mod policy { + pub use codex_rollout::EventPersistenceMode; + pub use codex_rollout::should_persist_response_item_for_memories; } -pub mod recorder { - pub use codex_rollout::recorder::*; +pub(crate) mod recorder { + pub use codex_rollout::RolloutRecorder; } -pub mod session_index { - pub use codex_rollout::session_index::*; +pub(crate) mod session_index { + pub use codex_rollout::append_thread_name; + pub use codex_rollout::find_thread_name_by_id; } pub(crate) use crate::session_rollout_init_error::map_session_init_error; diff --git a/codex-rs/core/src/safety.rs b/codex-rs/core/src/safety.rs index dec7a00876..4ed88783a6 100644 --- a/codex-rs/core/src/safety.rs +++ b/codex-rs/core/src/safety.rs @@ -12,6 +12,11 @@ use codex_protocol::protocol::SandboxPolicy; use codex_sandboxing::SandboxType; use codex_sandboxing::get_platform_sandbox; +const PATCH_REJECTED_OUTSIDE_PROJECT_REASON: &str = + "writing outside of the project; rejected by user approval settings"; +const PATCH_REJECTED_READ_ONLY_REASON: &str = + "writing is blocked by read-only sandbox; rejected by user approval settings"; + #[derive(Debug, PartialEq)] pub enum SafetyCheck { AutoApprove { @@ -85,9 +90,7 @@ pub fn assess_patch_safety( None => { if rejects_sandbox_approval { SafetyCheck::Reject { - reason: - "writing outside of the project; rejected by user approval settings" - .to_string(), + reason: patch_rejection_reason(sandbox_policy).to_string(), } } else { SafetyCheck::AskUser @@ -97,14 +100,22 @@ pub fn assess_patch_safety( } } else if rejects_sandbox_approval { SafetyCheck::Reject { - reason: "writing outside of the project; rejected by user approval settings" - .to_string(), + reason: patch_rejection_reason(sandbox_policy).to_string(), } } else { SafetyCheck::AskUser } } +fn patch_rejection_reason(sandbox_policy: &SandboxPolicy) -> &'static str { + match sandbox_policy { + SandboxPolicy::ReadOnly { .. } => PATCH_REJECTED_READ_ONLY_REASON, + SandboxPolicy::WorkspaceWrite { .. } + | SandboxPolicy::DangerFullAccess + | SandboxPolicy::ExternalSandbox { .. } => PATCH_REJECTED_OUTSIDE_PROJECT_REASON, + } +} + fn is_write_patch_constrained_to_writable_paths( action: &ApplyPatchAction, file_system_sandbox_policy: &FileSystemSandboxPolicy, diff --git a/codex-rs/core/src/safety_tests.rs b/codex-rs/core/src/safety_tests.rs index e7f55693d1..9ea59efd34 100644 --- a/codex-rs/core/src/safety_tests.rs +++ b/codex-rs/core/src/safety_tests.rs @@ -5,6 +5,7 @@ use codex_protocol::protocol::FileSystemSandboxEntry; use codex_protocol::protocol::FileSystemSpecialPath; use codex_protocol::protocol::GranularApprovalConfig; use codex_utils_absolute_path::AbsolutePathBuf; +use core_test_support::PathBufExt; use pretty_assertions::assert_eq; use tempfile::TempDir; @@ -17,7 +18,10 @@ fn test_writable_roots_constraint() { let parent = cwd.parent().unwrap().to_path_buf(); // Helper to build a single‑entry patch that adds a file at `p`. - let make_add_change = |p: PathBuf| ApplyPatchAction::new_add_for_test(&p, "".to_string()); + let make_add_change = |p: PathBuf| { + let p = p.abs(); + ApplyPatchAction::new_add_for_test(&p, "".to_string()) + }; let add_inside = make_add_change(cwd.join("inner.txt")); let add_outside = make_add_change(parent.join("outside.txt")); @@ -64,7 +68,8 @@ fn test_writable_roots_constraint() { fn external_sandbox_auto_approves_in_on_request() { let tmp = TempDir::new().unwrap(); let cwd = tmp.path().to_path_buf(); - let add_inside = ApplyPatchAction::new_add_for_test(&cwd.join("inner.txt"), "".to_string()); + let add_inside_path = cwd.join("inner.txt").abs(); + let add_inside = ApplyPatchAction::new_add_for_test(&add_inside_path, "".to_string()); let policy = SandboxPolicy::ExternalSandbox { network_access: codex_protocol::protocol::NetworkAccess::Enabled, @@ -91,8 +96,8 @@ fn granular_with_all_flags_true_matches_on_request_for_out_of_root_patch() { let tmp = TempDir::new().unwrap(); let cwd = tmp.path().to_path_buf(); let parent = cwd.parent().unwrap().to_path_buf(); - let add_outside = - ApplyPatchAction::new_add_for_test(&parent.join("outside.txt"), "".to_string()); + let outside_path = parent.join("outside.txt").abs(); + let add_outside = ApplyPatchAction::new_add_for_test(&outside_path, "".to_string()); let policy_workspace_only = SandboxPolicy::WorkspaceWrite { writable_roots: vec![], read_only_access: Default::default(), @@ -136,8 +141,8 @@ fn granular_sandbox_approval_false_rejects_out_of_root_patch() { let tmp = TempDir::new().unwrap(); let cwd = tmp.path().to_path_buf(); let parent = cwd.parent().unwrap().to_path_buf(); - let add_outside = - ApplyPatchAction::new_add_for_test(&parent.join("outside.txt"), "".to_string()); + let outside_path = parent.join("outside.txt").abs(); + let add_outside = ApplyPatchAction::new_add_for_test(&outside_path, "".to_string()); let policy_workspace_only = SandboxPolicy::WorkspaceWrite { writable_roots: vec![], read_only_access: Default::default(), @@ -162,8 +167,37 @@ fn granular_sandbox_approval_false_rejects_out_of_root_patch() { WindowsSandboxLevel::Disabled, ), SafetyCheck::Reject { - reason: "writing outside of the project; rejected by user approval settings" - .to_string(), + reason: PATCH_REJECTED_OUTSIDE_PROJECT_REASON.to_string(), + }, + ); +} + +#[test] +fn read_only_policy_rejects_patch_with_read_only_reason() { + let tmp = TempDir::new().unwrap(); + let cwd = tmp.path().to_path_buf(); + let inside_path = cwd.join("inside.txt").abs(); + let action = ApplyPatchAction::new_add_for_test(&inside_path, "".to_string()); + let sandbox_policy = SandboxPolicy::new_read_only_policy(); + let file_system_sandbox_policy = + FileSystemSandboxPolicy::from_legacy_sandbox_policy(&sandbox_policy, &cwd); + + assert!(!is_write_patch_constrained_to_writable_paths( + &action, + &file_system_sandbox_policy, + &cwd, + )); + assert_eq!( + assess_patch_safety( + &action, + AskForApproval::Never, + &sandbox_policy, + &file_system_sandbox_policy, + &cwd, + WindowsSandboxLevel::Disabled, + ), + SafetyCheck::Reject { + reason: PATCH_REJECTED_READ_ONLY_REASON.to_string(), }, ); } @@ -172,8 +206,8 @@ fn explicit_unreadable_paths_prevent_auto_approval_for_external_sandbox() { let tmp = TempDir::new().unwrap(); let cwd = tmp.path().to_path_buf(); let blocked_path = cwd.join("blocked.txt"); - let blocked_absolute = AbsolutePathBuf::from_absolute_path(blocked_path.clone()).unwrap(); - let action = ApplyPatchAction::new_add_for_test(&blocked_path, "".to_string()); + let blocked_absolute = blocked_path.abs(); + let action = ApplyPatchAction::new_add_for_test(&blocked_absolute, "".to_string()); let sandbox_policy = SandboxPolicy::ExternalSandbox { network_access: codex_protocol::protocol::NetworkAccess::Restricted, }; @@ -215,8 +249,9 @@ fn explicit_read_only_subpaths_prevent_auto_approval_for_external_sandbox() { let tmp = TempDir::new().unwrap(); let cwd = tmp.path().to_path_buf(); let blocked_path = cwd.join("docs").join("blocked.txt"); - let docs_absolute = AbsolutePathBuf::resolve_path_against_base("docs", &cwd).unwrap(); - let action = ApplyPatchAction::new_add_for_test(&blocked_path, "".to_string()); + let blocked_absolute = blocked_path.abs(); + let docs_absolute = AbsolutePathBuf::resolve_path_against_base("docs", &cwd); + let action = ApplyPatchAction::new_add_for_test(&blocked_absolute, "".to_string()); let sandbox_policy = SandboxPolicy::ExternalSandbox { network_access: codex_protocol::protocol::NetworkAccess::Restricted, }; @@ -257,8 +292,8 @@ fn explicit_read_only_subpaths_prevent_auto_approval_for_external_sandbox() { fn missing_project_dot_codex_config_requires_approval() { let tmp = TempDir::new().unwrap(); let cwd = tmp.path().to_path_buf(); - let action = - ApplyPatchAction::new_add_for_test(&cwd.join(".codex").join("config.toml"), "".to_string()); + let config_path = cwd.join(".codex").join("config.toml").abs(); + let action = ApplyPatchAction::new_add_for_test(&config_path, "".to_string()); let sandbox_policy = SandboxPolicy::WorkspaceWrite { writable_roots: vec![], read_only_access: Default::default(), diff --git a/codex-rs/core/src/session_startup_prewarm.rs b/codex-rs/core/src/session_startup_prewarm.rs index 397ffc949c..d3ab0e6198 100644 --- a/codex-rs/core/src/session_startup_prewarm.rs +++ b/codex-rs/core/src/session_startup_prewarm.rs @@ -13,9 +13,9 @@ use crate::codex::INITIAL_SUBMIT_ID; use crate::codex::Session; use crate::codex::build_prompt; use crate::codex::built_tools; +use codex_otel::STARTUP_PREWARM_AGE_AT_FIRST_TURN_METRIC; +use codex_otel::STARTUP_PREWARM_DURATION_METRIC; use codex_otel::SessionTelemetry; -use codex_otel::metrics::names::STARTUP_PREWARM_AGE_AT_FIRST_TURN_METRIC; -use codex_otel::metrics::names::STARTUP_PREWARM_DURATION_METRIC; use codex_protocol::error::Result as CodexResult; use codex_protocol::models::BaseInstructions; @@ -217,9 +217,9 @@ async fn schedule_startup_prewarm_inner( Vec::new(), startup_router.as_ref(), startup_turn_context.as_ref(), - BaseInstructions { + Some(BaseInstructions { text: base_instructions, - }, + }), ); let startup_turn_metadata_header = startup_turn_context .turn_metadata_state diff --git a/codex-rs/core/src/state/service.rs b/codex-rs/core/src/state/service.rs index e0d7741f41..e5c3a00cb2 100644 --- a/codex-rs/core/src/state/service.rs +++ b/codex-rs/core/src/state/service.rs @@ -17,7 +17,7 @@ use codex_analytics::AnalyticsEventsClient; use codex_exec_server::Environment; use codex_hooks::Hooks; use codex_login::AuthManager; -use codex_mcp::mcp_connection_manager::McpConnectionManager; +use codex_mcp::McpConnectionManager; use codex_models_manager::manager::ModelsManager; use codex_otel::SessionTelemetry; use codex_rollout::state_db::StateDbHandle; @@ -57,5 +57,5 @@ pub(crate) struct SessionServices { /// Session-scoped model client shared across turns. pub(crate) model_client: ModelClient, pub(crate) code_mode_service: CodeModeService, - pub(crate) environment: Arc, + pub(crate) environment: Option>, } diff --git a/codex-rs/core/src/state_db_bridge.rs b/codex-rs/core/src/state_db_bridge.rs index f073c498b5..c588f039d2 100644 --- a/codex-rs/core/src/state_db_bridge.rs +++ b/codex-rs/core/src/state_db_bridge.rs @@ -1,18 +1,5 @@ use codex_rollout::state_db as rollout_state_db; pub use codex_rollout::state_db::StateDbHandle; -pub use codex_rollout::state_db::apply_rollout_items; -pub use codex_rollout::state_db::find_rollout_path_by_id; -pub use codex_rollout::state_db::get_dynamic_tools; -pub use codex_rollout::state_db::list_thread_ids_db; -pub use codex_rollout::state_db::list_threads_db; -pub use codex_rollout::state_db::mark_thread_memory_mode_polluted; -pub use codex_rollout::state_db::normalize_cwd_for_state_db; -pub use codex_rollout::state_db::open_if_present; -pub use codex_rollout::state_db::persist_dynamic_tools; -pub use codex_rollout::state_db::read_repair_rollout_path; -pub use codex_rollout::state_db::reconcile_rollout; -pub use codex_rollout::state_db::touch_thread_updated_at; -pub use codex_state::LogEntry; use crate::config::Config; diff --git a/codex-rs/core/src/stream_events_utils.rs b/codex-rs/core/src/stream_events_utils.rs index 75eb1a8c22..ac4334d683 100644 --- a/codex-rs/core/src/stream_events_utils.rs +++ b/codex-rs/core/src/stream_events_utils.rs @@ -379,17 +379,12 @@ pub(crate) async fn handle_non_tool_response_item( .parent() .unwrap_or(turn_context.config.codex_home.as_path()); let message: ResponseItem = DeveloperInstructions::new(format!( - "Generated images are saved to {} as {} by default.", + "Generated images are saved to {} as {} by default.\nIf you need to use a generated image at another path, copy it and leave the original in place unless the user explicitly asks you to delete it.", image_output_dir.display(), image_output_path.display(), )) .into(); - let copy_message: ResponseItem = DeveloperInstructions::new( - "If you need to use a generated image at another path, copy it and leave the original in place unless the user explicitly asks you to delete it." - .to_string(), - ) - .into(); - sess.record_conversation_items(turn_context, &[message, copy_message]) + sess.record_conversation_items(turn_context, &[message]) .await; } Err(err) => { diff --git a/codex-rs/core/src/tasks/mod.rs b/codex-rs/core/src/tasks/mod.rs index 5272921586..5da3650bf8 100644 --- a/codex-rs/core/src/tasks/mod.rs +++ b/codex-rs/core/src/tasks/mod.rs @@ -33,10 +33,10 @@ use crate::state::TaskKind; use codex_login::AuthManager; use codex_models_manager::manager::ModelsManager; use codex_otel::SessionTelemetry; -use codex_otel::metrics::names::TURN_E2E_DURATION_METRIC; -use codex_otel::metrics::names::TURN_NETWORK_PROXY_METRIC; -use codex_otel::metrics::names::TURN_TOKEN_USAGE_METRIC; -use codex_otel::metrics::names::TURN_TOOL_CALL_METRIC; +use codex_otel::TURN_E2E_DURATION_METRIC; +use codex_otel::TURN_NETWORK_PROXY_METRIC; +use codex_otel::TURN_TOKEN_USAGE_METRIC; +use codex_otel::TURN_TOOL_CALL_METRIC; use codex_protocol::models::ContentItem; use codex_protocol::models::ResponseInputItem; use codex_protocol::models::ResponseItem; @@ -511,9 +511,15 @@ impl Session { &[("token_type", "reasoning_output"), tmp_mem], ); } + let (completed_at, duration_ms) = turn_context + .turn_timing_state + .completed_at_and_duration_ms() + .await; let event = EventMsg::TurnComplete(TurnCompleteEvent { turn_id: turn_context.sub_id.clone(), last_agent_message, + completed_at, + duration_ms, }); self.send_event(turn_context.as_ref(), event).await; @@ -588,9 +594,16 @@ impl Session { self.flush_rollout().await; } + let (completed_at, duration_ms) = task + .turn_context + .turn_timing_state + .completed_at_and_duration_ms() + .await; let event = EventMsg::TurnAborted(TurnAbortedEvent { turn_id: Some(task.turn_context.sub_id.clone()), reason, + completed_at, + duration_ms, }); self.send_event(task.turn_context.as_ref(), event).await; } diff --git a/codex-rs/core/src/tasks/mod_tests.rs b/codex-rs/core/src/tasks/mod_tests.rs index 6573da5d4c..22bf0a99c8 100644 --- a/codex-rs/core/src/tasks/mod_tests.rs +++ b/codex-rs/core/src/tasks/mod_tests.rs @@ -1,8 +1,8 @@ use super::emit_turn_network_proxy_metric; +use codex_otel::MetricsClient; +use codex_otel::MetricsConfig; use codex_otel::SessionTelemetry; -use codex_otel::metrics::MetricsClient; -use codex_otel::metrics::MetricsConfig; -use codex_otel::metrics::names::TURN_NETWORK_PROXY_METRIC; +use codex_otel::TURN_NETWORK_PROXY_METRIC; use codex_protocol::ThreadId; use codex_protocol::protocol::SessionSource; use opentelemetry::KeyValue; diff --git a/codex-rs/core/src/tasks/regular.rs b/codex-rs/core/src/tasks/regular.rs index f2a29ee7ab..2a26dbccad 100644 --- a/codex-rs/core/src/tasks/regular.rs +++ b/codex-rs/core/src/tasks/regular.rs @@ -46,6 +46,7 @@ impl SessionTask for RegularTask { // not wait on startup prewarm resolution. let event = EventMsg::TurnStarted(TurnStartedEvent { turn_id: ctx.sub_id.clone(), + started_at: ctx.turn_timing_state.started_at_unix_secs().await, model_context_window: ctx.model_context_window(), collaboration_mode_kind: ctx.collaboration_mode.mode, }); diff --git a/codex-rs/core/src/tasks/review.rs b/codex-rs/core/src/tasks/review.rs index a1cc071108..834cf20b4b 100644 --- a/codex-rs/core/src/tasks/review.rs +++ b/codex-rs/core/src/tasks/review.rs @@ -112,7 +112,7 @@ async fn start_review_conversation( let _ = sub_agent_config.features.disable(Feature::Collab); // Set explicit review rubric for the sub-agent - sub_agent_config.base_instructions = Some(crate::REVIEW_PROMPT.to_string()); + sub_agent_config.base_instructions = Some(Some(crate::REVIEW_PROMPT.to_string())); sub_agent_config.permissions.approval_policy = Constrained::allow_only(AskForApproval::Never); let model = config diff --git a/codex-rs/core/src/tasks/user_shell.rs b/codex-rs/core/src/tasks/user_shell.rs index bd473138b6..744d8d797c 100644 --- a/codex-rs/core/src/tasks/user_shell.rs +++ b/codex-rs/core/src/tasks/user_shell.rs @@ -111,6 +111,7 @@ pub(crate) async fn execute_user_shell_command( // freshly reinjected context before the summary/replacement history is applied. let event = EventMsg::TurnStarted(TurnStartedEvent { turn_id: turn_context.sub_id.clone(), + started_at: turn_context.turn_timing_state.started_at_unix_secs().await, model_context_window: turn_context.model_context_window(), collaboration_mode_kind: turn_context.collaboration_mode.mode, }); @@ -123,11 +124,16 @@ pub(crate) async fn execute_user_shell_command( let use_login_shell = true; let session_shell = session.user_shell(); let display_command = session_shell.derive_exec_args(&command, use_login_shell); + let exec_env_map = create_env( + &turn_context.shell_environment_policy, + Some(session.conversation_id), + ); let exec_command = maybe_wrap_shell_lc_with_snapshot( &display_command, session_shell.as_ref(), turn_context.cwd.as_path(), &turn_context.shell_environment_policy.r#set, + &exec_env_map, ); let call_id = Uuid::new_v4().to_string(); @@ -155,10 +161,7 @@ pub(crate) async fn execute_user_shell_command( let exec_env = ExecRequest { command: exec_command.clone(), cwd: cwd.to_path_buf(), - env: create_env( - &turn_context.shell_environment_policy, - Some(session.conversation_id), - ), + env: exec_env_map, network: turn_context.network.clone(), // TODO(zhao-oai): Now that we have ExecExpiration::Cancellation, we // should use that instead of an "arbitrarily large" timeout here. diff --git a/codex-rs/core/src/thread_manager.rs b/codex-rs/core/src/thread_manager.rs index f560a306b8..2dd4ad5532 100644 --- a/codex-rs/core/src/thread_manager.rs +++ b/codex-rs/core/src/thread_manager.rs @@ -43,9 +43,11 @@ use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::TurnAbortReason; use codex_protocol::protocol::TurnAbortedEvent; use codex_protocol::protocol::W3cTraceContext; +use codex_state::DirectionalThreadSpawnEdgeStatus; use futures::StreamExt; use futures::stream::FuturesUnordered; use std::collections::HashMap; +use std::collections::HashSet; use std::path::PathBuf; use std::sync::Arc; use std::sync::atomic::AtomicBool; @@ -401,6 +403,53 @@ impl ThreadManager { self.state.get_thread(thread_id).await } + /// List `thread_id` plus all known descendants in its spawn subtree. + pub async fn list_agent_subtree_thread_ids( + &self, + thread_id: ThreadId, + ) -> CodexResult> { + let thread = self.state.get_thread(thread_id).await?; + + let mut subtree_thread_ids = Vec::new(); + let mut seen_thread_ids = HashSet::new(); + subtree_thread_ids.push(thread_id); + seen_thread_ids.insert(thread_id); + + if let Some(state_db_ctx) = thread.state_db() { + for status in [ + DirectionalThreadSpawnEdgeStatus::Open, + DirectionalThreadSpawnEdgeStatus::Closed, + ] { + for descendant_id in state_db_ctx + .list_thread_spawn_descendants_with_status(thread_id, status) + .await + .map_err(|err| { + CodexErr::Fatal(format!("failed to load thread-spawn descendants: {err}")) + })? + { + if seen_thread_ids.insert(descendant_id) { + subtree_thread_ids.push(descendant_id); + } + } + } + } + + for descendant_id in thread + .codex + .session + .services + .agent_control + .list_live_agent_subtree_thread_ids(thread_id) + .await? + { + if seen_thread_ids.insert(descendant_id) { + subtree_thread_ids.push(descendant_id); + } + } + + Ok(subtree_thread_ids) + } + pub async fn start_thread(&self, config: Config) -> CodexResult { // Box delegated thread-spawn futures so these convenience wrappers do // not inline the full spawn path into every caller's async state. @@ -1010,6 +1059,8 @@ fn append_interrupted_boundary(history: InitialHistory, turn_id: Option) let aborted_event = RolloutItem::EventMsg(EventMsg::TurnAborted(TurnAbortedEvent { turn_id, reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, })); match history { diff --git a/codex-rs/core/src/thread_manager_tests.rs b/codex-rs/core/src/thread_manager_tests.rs index 92ecd68afa..3db1735822 100644 --- a/codex-rs/core/src/thread_manager_tests.rs +++ b/codex-rs/core/src/thread_manager_tests.rs @@ -164,6 +164,7 @@ fn out_of_range_truncation_drops_pre_user_active_turn_prefix() { RolloutItem::ResponseItem(assistant_msg("a1")), RolloutItem::EventMsg(EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-2".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), })), @@ -320,6 +321,8 @@ fn interrupted_fork_snapshot_appends_interrupt_boundary() { RolloutItem::EventMsg(EventMsg::TurnAborted(TurnAbortedEvent { turn_id: None, reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, })), ]) .expect("serialize expected interrupted fork history"), @@ -334,6 +337,8 @@ fn interrupted_fork_snapshot_appends_interrupt_boundary() { RolloutItem::EventMsg(EventMsg::TurnAborted(TurnAbortedEvent { turn_id: None, reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, })), ]) .expect("serialize expected interrupted empty history"), @@ -349,6 +354,8 @@ fn interrupted_snapshot_is_not_mid_turn() { RolloutItem::EventMsg(EventMsg::TurnAborted(TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, })), ]); @@ -485,6 +492,8 @@ async fn interrupted_fork_snapshot_does_not_synthesize_turn_id_for_legacy_histor EventMsg::TurnAborted(TurnAbortedEvent { turn_id: expected_turn_id, reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }), )) .expect("serialize interrupted abort event"); @@ -536,6 +545,7 @@ async fn interrupted_fork_snapshot_preserves_explicit_turn_id() { InitialHistory::Forked(vec![ RolloutItem::EventMsg(EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-explicit".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: Default::default(), })), @@ -594,6 +604,8 @@ async fn interrupted_fork_snapshot_preserves_explicit_turn_id() { RolloutItem::EventMsg(EventMsg::TurnAborted(TurnAbortedEvent { turn_id: Some(turn_id), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, })) if turn_id == "turn-explicit" ) })); diff --git a/codex-rs/core/src/tools/context.rs b/codex-rs/core/src/tools/context.rs index 7c486640ee..01dd0548b4 100644 --- a/codex-rs/core/src/tools/context.rs +++ b/codex-rs/core/src/tools/context.rs @@ -364,13 +364,6 @@ impl ExecCommandToolOutput { fn response_text(&self) -> String { let mut sections = Vec::new(); - if let Some(command) = &self.session_command { - sections.push(format!( - "Command: {}", - codex_shell_command::parse_command::shlex_join(command) - )); - } - if !self.chunk_id.is_empty() { sections.push(format!("Chunk ID: {}", self.chunk_id)); } diff --git a/codex-rs/core/src/tools/context_tests.rs b/codex-rs/core/src/tools/context_tests.rs index f7641a1db5..07fcfda981 100644 --- a/codex-rs/core/src/tools/context_tests.rs +++ b/codex-rs/core/src/tools/context_tests.rs @@ -249,11 +249,7 @@ fn exec_command_tool_output_formats_truncated_response() { process_id: None, exit_code: Some(0), original_token_count: Some(10), - session_command: Some(vec![ - "/bin/zsh".to_string(), - "-lc".to_string(), - "rm -rf /tmp/example.sqlite".to_string(), - ]), + session_command: None, } .to_response_item("call-42", &payload); @@ -267,8 +263,7 @@ fn exec_command_tool_output_formats_truncated_response() { .expect("exec output should serialize as text"); assert_regex_match( r#"(?sx) - ^Command:\ /bin/zsh\ -lc\ 'rm\ -rf\ /tmp/example\.sqlite' - \nChunk\ ID:\ abc123 + ^Chunk\ ID:\ abc123 \nWall\ time:\ \d+\.\d{4}\ seconds \nProcess\ exited\ with\ code\ 0 \nOriginal\ token\ count:\ 10 diff --git a/codex-rs/core/src/tools/handlers/agent_jobs.rs b/codex-rs/core/src/tools/handlers/agent_jobs.rs index fe30f3d750..837b654edc 100644 --- a/codex-rs/core/src/tools/handlers/agent_jobs.rs +++ b/codex-rs/core/src/tools/handlers/agent_jobs.rs @@ -535,7 +535,7 @@ async fn build_runner_options( let max_concurrency = normalize_concurrency(requested_concurrency, turn.config.agent_max_threads); let base_instructions = session.get_base_instructions().await; - let spawn_config = build_agent_spawn_config(&base_instructions, turn.as_ref())?; + let spawn_config = build_agent_spawn_config(base_instructions.as_ref(), turn.as_ref())?; Ok(JobRunnerOptions { max_concurrency, spawn_config, diff --git a/codex-rs/core/src/tools/handlers/apply_patch.rs b/codex-rs/core/src/tools/handlers/apply_patch.rs index 00b1173aa4..0be920feb3 100644 --- a/codex-rs/core/src/tools/handlers/apply_patch.rs +++ b/codex-rs/core/src/tools/handlers/apply_patch.rs @@ -23,6 +23,7 @@ use crate::tools::runtimes::apply_patch::ApplyPatchRuntime; use crate::tools::sandboxing::ToolCtx; use codex_apply_patch::ApplyPatchAction; use codex_apply_patch::ApplyPatchFileChange; +use codex_exec_server::ExecutorFileSystem; use codex_protocol::models::FileSystemPermissions; use codex_protocol::models::PermissionProfile; use codex_sandboxing::policy_transforms::effective_file_system_sandbox_policy; @@ -37,7 +38,7 @@ pub struct ApplyPatchHandler; fn file_paths_for_action(action: &ApplyPatchAction) -> Vec { let mut keys = Vec::new(); - let cwd = action.cwd.as_path(); + let cwd = &action.cwd; for (path, change) in action.changes() { if let Some(key) = to_abs_path(cwd, path) { @@ -55,14 +56,14 @@ fn file_paths_for_action(action: &ApplyPatchAction) -> Vec { keys } -fn to_abs_path(cwd: &Path, path: &Path) -> Option { - AbsolutePathBuf::resolve_path_against_base(path, cwd).ok() +fn to_abs_path(cwd: &AbsolutePathBuf, path: &Path) -> Option { + Some(AbsolutePathBuf::resolve_path_against_base(path, cwd)) } fn write_permissions_for_paths( file_paths: &[AbsolutePathBuf], file_system_sandbox_policy: &codex_protocol::permissions::FileSystemSandboxPolicy, - cwd: &Path, + cwd: &AbsolutePathBuf, ) -> Option { let write_paths = file_paths .iter() @@ -71,7 +72,9 @@ fn write_permissions_for_paths( .unwrap_or_else(|| path.clone()) .into_path_buf() }) - .filter(|path| !file_system_sandbox_policy.can_write_path_with_cwd(path.as_path(), cwd)) + .filter(|path| { + !file_system_sandbox_policy.can_write_path_with_cwd(path.as_path(), cwd.as_path()) + }) .collect::>() .into_iter() .map(AbsolutePathBuf::from_absolute_path) @@ -110,7 +113,7 @@ async fn effective_patch_permissions( let effective_additional_permissions = apply_granted_turn_permissions( session, crate::sandboxing::SandboxPermissions::UseDefault, - write_permissions_for_paths(&file_paths, &file_system_sandbox_policy, turn.cwd.as_path()), + write_permissions_for_paths(&file_paths, &file_system_sandbox_policy, &turn.cwd), ) .await; @@ -167,7 +170,14 @@ impl ToolHandler for ApplyPatchHandler { // Avoid building temporary ExecParams/command vectors; derive directly from inputs. let cwd = turn.cwd.clone(); let command = vec!["apply_patch".to_string(), patch_input.clone()]; - match codex_apply_patch::maybe_parse_apply_patch_verified(&command, &cwd) { + let Some(environment) = turn.environment.as_ref() else { + return Err(FunctionCallError::RespondToModel( + "apply_patch is unavailable in this session".to_string(), + )); + }; + let fs = environment.get_filesystem(); + match codex_apply_patch::maybe_parse_apply_patch_verified(&command, &cwd, fs.as_ref()).await + { codex_apply_patch::MaybeApplyPatchVerified::Body(changes) => { let (file_paths, effective_additional_permissions, file_system_sandbox_policy) = effective_patch_permissions(session.as_ref(), turn.as_ref(), &changes).await; @@ -254,7 +264,8 @@ impl ToolHandler for ApplyPatchHandler { #[allow(clippy::too_many_arguments)] pub(crate) async fn intercept_apply_patch( command: &[String], - cwd: &Path, + cwd: &AbsolutePathBuf, + fs: &dyn ExecutorFileSystem, timeout_ms: Option, session: Arc, turn: Arc, @@ -262,7 +273,7 @@ pub(crate) async fn intercept_apply_patch( call_id: &str, tool_name: &str, ) -> Result, FunctionCallError> { - match codex_apply_patch::maybe_parse_apply_patch_verified(command, cwd) { + match codex_apply_patch::maybe_parse_apply_patch_verified(command, cwd, fs).await { codex_apply_patch::MaybeApplyPatchVerified::Body(changes) => { session .record_model_warning( diff --git a/codex-rs/core/src/tools/handlers/apply_patch_tests.rs b/codex-rs/core/src/tools/handlers/apply_patch_tests.rs index 86ae586929..afe2e09db4 100644 --- a/codex-rs/core/src/tools/handlers/apply_patch_tests.rs +++ b/codex-rs/core/src/tools/handlers/apply_patch_tests.rs @@ -1,17 +1,21 @@ use super::*; use codex_apply_patch::MaybeApplyPatchVerified; +use codex_exec_server::LOCAL_FS; use codex_protocol::permissions::FileSystemSandboxPolicy; use codex_protocol::protocol::SandboxPolicy; +use core_test_support::PathBufExt; +use core_test_support::PathExt; use pretty_assertions::assert_eq; use tempfile::TempDir; -#[test] -fn approval_keys_include_move_destination() { +#[tokio::test] +async fn approval_keys_include_move_destination() { let tmp = TempDir::new().expect("tmp"); - let cwd = tmp.path(); - std::fs::create_dir_all(cwd.join("old")).expect("create old dir"); - std::fs::create_dir_all(cwd.join("renamed/dir")).expect("create dest dir"); - std::fs::write(cwd.join("old/name.txt"), "old content\n").expect("write old file"); + let cwd_path = tmp.path(); + let cwd = cwd_path.abs(); + std::fs::create_dir_all(cwd_path.join("old")).expect("create old dir"); + std::fs::create_dir_all(cwd_path.join("renamed/dir")).expect("create dest dir"); + std::fs::write(cwd_path.join("old/name.txt"), "old content\n").expect("write old file"); let patch = r#"*** Begin Patch *** Update File: old/name.txt *** Move to: renamed/dir/name.txt @@ -20,10 +24,13 @@ fn approval_keys_include_move_destination() { +new content *** End Patch"#; let argv = vec!["apply_patch".to_string(), patch.to_string()]; - let action = match codex_apply_patch::maybe_parse_apply_patch_verified(&argv, cwd) { - MaybeApplyPatchVerified::Body(action) => action, - other => panic!("expected patch body, got: {other:?}"), - }; + let action = + match codex_apply_patch::maybe_parse_apply_patch_verified(&argv, &cwd, LOCAL_FS.as_ref()) + .await + { + MaybeApplyPatchVerified::Body(action) => action, + other => panic!("expected patch body, got: {other:?}"), + }; let keys = file_paths_for_action(&action); assert_eq!(keys.len(), 2); @@ -32,8 +39,9 @@ fn approval_keys_include_move_destination() { #[test] fn write_permissions_for_paths_skip_dirs_already_writable_under_workspace_root() { let tmp = TempDir::new().expect("tmp"); - let cwd = tmp.path(); - let nested = cwd.join("nested"); + let cwd_path = tmp.path(); + let cwd = cwd_path.abs(); + let nested = cwd_path.join("nested"); std::fs::create_dir_all(&nested).expect("create nested dir"); let file_path = AbsolutePathBuf::try_from(nested.join("file.txt")) .expect("nested file path should be absolute"); @@ -45,7 +53,7 @@ fn write_permissions_for_paths_skip_dirs_already_writable_under_workspace_root() exclude_slash_tmp: false, }); - let permissions = write_permissions_for_paths(&[file_path], &sandbox_policy, cwd); + let permissions = write_permissions_for_paths(&[file_path], &sandbox_policy, &cwd); assert_eq!(permissions, None); } @@ -59,6 +67,7 @@ fn write_permissions_for_paths_keep_dirs_outside_workspace_root() { std::fs::create_dir_all(&outside).expect("create outside dir"); let file_path = AbsolutePathBuf::try_from(outside.join("file.txt")) .expect("outside file path should be absolute"); + let cwd_abs = cwd.abs(); let sandbox_policy = FileSystemSandboxPolicy::from(&SandboxPolicy::WorkspaceWrite { writable_roots: vec![], read_only_access: Default::default(), @@ -67,11 +76,9 @@ fn write_permissions_for_paths_keep_dirs_outside_workspace_root() { exclude_slash_tmp: true, }); - let permissions = write_permissions_for_paths(&[file_path], &sandbox_policy, &cwd); - let expected_outside = AbsolutePathBuf::from_absolute_path(dunce::simplified( - &outside.canonicalize().expect("canonicalize outside dir"), - )) - .expect("outside dir should be absolute"); + let permissions = write_permissions_for_paths(&[file_path], &sandbox_policy, &cwd_abs); + let expected_outside = + dunce::simplified(&outside.canonicalize().expect("canonicalize outside dir")).abs(); assert_eq!( permissions.and_then(|profile| profile.file_system.and_then(|fs| fs.write)), diff --git a/codex-rs/core/src/tools/handlers/mod.rs b/codex-rs/core/src/tools/handlers/mod.rs index f0a62b8c14..ec6fc02cfa 100644 --- a/codex-rs/core/src/tools/handlers/mod.rs +++ b/codex-rs/core/src/tools/handlers/mod.rs @@ -1,5 +1,5 @@ pub(crate) mod agent_jobs; -pub mod apply_patch; +pub(crate) mod apply_patch; mod dynamic; mod js_repl; mod list_dir; diff --git a/codex-rs/core/src/tools/handlers/multi_agents.rs b/codex-rs/core/src/tools/handlers/multi_agents.rs index df66543da6..984e09a968 100644 --- a/codex-rs/core/src/tools/handlers/multi_agents.rs +++ b/codex-rs/core/src/tools/handlers/multi_agents.rs @@ -63,7 +63,7 @@ pub(crate) use send_input::Handler as SendInputHandler; pub(crate) use spawn::Handler as SpawnAgentHandler; pub(crate) use wait::Handler as WaitAgentHandler; -pub mod close_agent; +pub(crate) mod close_agent; mod resume_agent; mod send_input; mod spawn; diff --git a/codex-rs/core/src/tools/handlers/multi_agents/spawn.rs b/codex-rs/core/src/tools/handlers/multi_agents/spawn.rs index 8e4bfb5b59..a2482ea6a9 100644 --- a/codex-rs/core/src/tools/handlers/multi_agents/spawn.rs +++ b/codex-rs/core/src/tools/handlers/multi_agents/spawn.rs @@ -59,8 +59,10 @@ impl ToolHandler for Handler { .into(), ) .await; - let mut config = - build_agent_spawn_config(&session.get_base_instructions().await, turn.as_ref())?; + let mut config = build_agent_spawn_config( + session.get_base_instructions().await.as_ref(), + turn.as_ref(), + )?; apply_requested_spawn_agent_model_overrides( &session, turn.as_ref(), diff --git a/codex-rs/core/src/tools/handlers/multi_agents_common.rs b/codex-rs/core/src/tools/handlers/multi_agents_common.rs index 2078c229b9..4205c4bb43 100644 --- a/codex-rs/core/src/tools/handlers/multi_agents_common.rs +++ b/codex-rs/core/src/tools/handlers/multi_agents_common.rs @@ -201,11 +201,12 @@ pub(crate) fn parse_collab_input( /// skipping this helper and cloning stale config state directly can send the child agent out with /// the wrong provider or runtime policy. pub(crate) fn build_agent_spawn_config( - base_instructions: &BaseInstructions, + base_instructions: Option<&BaseInstructions>, turn: &TurnContext, ) -> Result { let mut config = build_agent_shared_config(turn)?; - config.base_instructions = Some(base_instructions.text.clone()); + config.base_instructions = + Some(base_instructions.map(|base_instructions| base_instructions.text.clone())); Ok(config) } diff --git a/codex-rs/core/src/tools/handlers/multi_agents_tests.rs b/codex-rs/core/src/tools/handlers/multi_agents_tests.rs index 8250d84f31..5f09c248a4 100644 --- a/codex-rs/core/src/tools/handlers/multi_agents_tests.rs +++ b/codex-rs/core/src/tools/handlers/multi_agents_tests.rs @@ -1,4 +1,5 @@ use super::*; +use crate::CodexThread; use crate::ThreadManager; use crate::codex::make_session_and_context; use crate::config::DEFAULT_AGENT_MAX_DEPTH; @@ -112,6 +113,74 @@ fn history_contains_inter_agent_communication( }) } +async fn wait_for_turn_aborted( + thread: &Arc, + expected_turn_id: &str, + expected_reason: TurnAbortReason, +) { + timeout(Duration::from_secs(5), async { + loop { + let event = thread + .next_event() + .await + .expect("child thread should emit events"); + if matches!( + event.msg, + EventMsg::TurnAborted(TurnAbortedEvent { + turn_id: Some(ref turn_id), + ref reason, + .. + }) if turn_id == expected_turn_id && *reason == expected_reason + ) { + break; + } + } + }) + .await + .expect("expected child turn to be interrupted"); +} + +async fn wait_for_redirected_envelope_in_history( + thread: &Arc, + expected: &InterAgentCommunication, +) { + timeout(Duration::from_secs(5), async { + loop { + let history_items = thread + .codex + .session + .clone_history() + .await + .raw_items() + .to_vec(); + let saw_envelope = + history_contains_inter_agent_communication(&history_items, expected); + let saw_user_message = history_items.iter().any(|item| { + matches!( + item, + ResponseItem::Message { role, content, .. } + if role == "user" + && content.iter().any(|content_item| matches!( + content_item, + ContentItem::InputText { text } + if text == &expected.content + )) + ) + }); + if saw_envelope { + assert!( + !saw_user_message, + "redirected followup should be stored as an assistant envelope, not a plain user message" + ); + break; + } + tokio::time::sleep(Duration::from_millis(10)).await; + } + }) + .await + .expect("redirected followup envelope should appear in history"); +} + #[derive(Clone, Copy)] struct NeverEndingTask; @@ -757,7 +826,7 @@ async fn multi_agent_v2_followup_task_rejects_root_target_from_child() { agent_role: None, }); - let err = FollowupTaskHandlerV2 + let Err(err) = FollowupTaskHandlerV2 .handle(invocation( Arc::new(session), Arc::new(turn), @@ -769,7 +838,9 @@ async fn multi_agent_v2_followup_task_rejects_root_target_from_child() { })), )) .await - .expect_err("followup_task should reject the root target"); + else { + panic!("followup_task should reject the root target"); + }; assert_eq!( err, @@ -837,6 +908,8 @@ async fn multi_agent_v2_list_agents_returns_completed_status_and_last_task_messa EventMsg::TurnComplete(TurnCompleteEvent { turn_id: child_turn.sub_id.clone(), last_agent_message: Some("done".to_string()), + completed_at: None, + duration_ms: None, }), ) .await; @@ -1199,6 +1272,7 @@ async fn multi_agent_v2_followup_task_interrupts_busy_child_without_losing_messa .expect("worker thread should exist"); let active_turn = thread.codex.session.new_default_turn().await; + let interrupted_turn_id = active_turn.sub_id.clone(); thread .codex .session @@ -1243,44 +1317,18 @@ async fn multi_agent_v2_followup_task_interrupts_busy_child_without_losing_messa ) })); - timeout(Duration::from_secs(5), async { - loop { - let history_items = thread - .codex - .session - .clone_history() - .await - .raw_items() - .to_vec(); - let saw_envelope = history_contains_inter_agent_communication( - &history_items, - &InterAgentCommunication::new( - AgentPath::root(), - AgentPath::try_from("/root/worker").expect("agent path"), - Vec::new(), - "continue".to_string(), - /*trigger_turn*/ true, - ), - ); - let saw_user_message = history_items.iter().any(|item| { - matches!( - item, - ResponseItem::Message { role, content, .. } - if role == "user" - && content.iter().any(|content_item| matches!( - content_item, - ContentItem::InputText { text } if text == "continue" - )) - ) - }); - if saw_envelope && !saw_user_message { - break; - } - tokio::time::sleep(Duration::from_millis(10)).await; - } - }) - .await - .expect("interrupting v2 followup_task should preserve the redirected message"); + wait_for_turn_aborted(&thread, &interrupted_turn_id, TurnAbortReason::Interrupted).await; + wait_for_redirected_envelope_in_history( + &thread, + &InterAgentCommunication::new( + AgentPath::root(), + AgentPath::try_from("/root/worker").expect("agent path"), + Vec::new(), + "continue".to_string(), + /*trigger_turn*/ true, + ), + ) + .await; let _ = thread .submit(Op::Shutdown {}) @@ -1337,6 +1385,8 @@ async fn multi_agent_v2_followup_task_completion_notifies_parent_on_every_turn() EventMsg::TurnComplete(TurnCompleteEvent { turn_id: first_turn.sub_id.clone(), last_agent_message: Some("first done".to_string()), + completed_at: None, + duration_ms: None, }), ) .await; @@ -1363,6 +1413,8 @@ async fn multi_agent_v2_followup_task_completion_notifies_parent_on_every_turn() EventMsg::TurnComplete(TurnCompleteEvent { turn_id: second_turn.sub_id.clone(), last_agent_message: Some("second done".to_string()), + completed_at: None, + duration_ms: None, }), ) .await; @@ -1518,6 +1570,8 @@ async fn multi_agent_v2_interrupted_turn_does_not_notify_parent() { EventMsg::TurnAborted(TurnAbortedEvent { turn_id: Some(aborted_turn.sub_id.clone()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }), ) .await; @@ -1546,7 +1600,7 @@ async fn multi_agent_v2_interrupted_turn_does_not_notify_parent() { } #[tokio::test] -async fn multi_agent_v2_spawn_includes_agent_id_key_when_named() { +async fn multi_agent_v2_spawn_omits_agent_id_when_named() { let (mut session, mut turn) = make_session_and_context().await; let manager = thread_manager(); let root = manager @@ -1578,7 +1632,7 @@ async fn multi_agent_v2_spawn_includes_agent_id_key_when_named() { let result: serde_json::Value = serde_json::from_str(&content).expect("spawn_agent result should be json"); - assert_eq!(result["agent_id"], serde_json::Value::Null); + assert!(result.get("agent_id").is_none()); assert_eq!(result["task_name"], "/root/test_process"); assert!(result.get("nickname").is_some()); assert_eq!(success, Some(true)); @@ -3201,9 +3255,9 @@ async fn build_agent_spawn_config_uses_turn_context_values() { .set(AskForApproval::OnRequest) .expect("approval policy set"); - let config = build_agent_spawn_config(&base_instructions, &turn).expect("spawn config"); + let config = build_agent_spawn_config(Some(&base_instructions), &turn).expect("spawn config"); let mut expected = (*turn.config).clone(); - expected.base_instructions = Some(base_instructions.text); + expected.base_instructions = Some(Some(base_instructions.text)); expected.model = Some(turn.model_info.slug.clone()); expected.model_provider = turn.provider.clone(); expected.model_reasoning_effort = turn.reasoning_effort; @@ -3239,7 +3293,7 @@ async fn build_agent_spawn_config_preserves_base_user_instructions() { text: "base".to_string(), }; - let config = build_agent_spawn_config(&base_instructions, &turn).expect("spawn config"); + let config = build_agent_spawn_config(Some(&base_instructions), &turn).expect("spawn config"); assert_eq!(config.user_instructions, base_config.user_instructions); } @@ -3248,7 +3302,7 @@ async fn build_agent_spawn_config_preserves_base_user_instructions() { async fn build_agent_resume_config_clears_base_instructions() { let (_session, mut turn) = make_session_and_context().await; let mut base_config = (*turn.config).clone(); - base_config.base_instructions = Some("caller-base".to_string()); + base_config.base_instructions = Some(Some("caller-base".to_string())); turn.config = Arc::new(base_config); turn.approval_policy .set(AskForApproval::OnRequest) diff --git a/codex-rs/core/src/tools/handlers/multi_agents_v2/followup_task.rs b/codex-rs/core/src/tools/handlers/multi_agents_v2/followup_task.rs index 312e17a316..e618f5da2b 100644 --- a/codex-rs/core/src/tools/handlers/multi_agents_v2/followup_task.rs +++ b/codex-rs/core/src/tools/handlers/multi_agents_v2/followup_task.rs @@ -1,13 +1,13 @@ use super::message_tool::FollowupTaskArgs; use super::message_tool::MessageDeliveryMode; -use super::message_tool::MessageToolResult; use super::message_tool::handle_message_string_tool; use super::*; +use crate::tools::context::FunctionToolOutput; pub(crate) struct Handler; impl ToolHandler for Handler { - type Output = MessageToolResult; + type Output = FunctionToolOutput; fn kind(&self) -> ToolKind { ToolKind::Function diff --git a/codex-rs/core/src/tools/handlers/multi_agents_v2/message_tool.rs b/codex-rs/core/src/tools/handlers/multi_agents_v2/message_tool.rs index 3123e93f4c..c9c2fda51e 100644 --- a/codex-rs/core/src/tools/handlers/multi_agents_v2/message_tool.rs +++ b/codex-rs/core/src/tools/handlers/multi_agents_v2/message_tool.rs @@ -4,6 +4,7 @@ //! resulting `InterAgentCommunication` should wake the target immediately. use super::*; +use crate::tools::context::FunctionToolOutput; use codex_protocol::protocol::InterAgentCommunication; #[derive(Clone, Copy, PartialEq, Eq)] @@ -46,30 +47,6 @@ pub(crate) struct FollowupTaskArgs { pub(crate) interrupt: bool, } -#[derive(Debug, Serialize)] -/// Tool result shared by the MultiAgentV2 message-delivery tools. -pub(crate) struct MessageToolResult { - submission_id: String, -} - -impl ToolOutput for MessageToolResult { - fn log_preview(&self) -> String { - tool_output_json_text(self, "multi_agent_message") - } - - fn success_for_logging(&self) -> bool { - true - } - - fn to_response_item(&self, call_id: &str, payload: &ToolPayload) -> ResponseInputItem { - tool_output_response_item(call_id, payload, self, Some(true), "multi_agent_message") - } - - fn code_mode_result(&self, _payload: &ToolPayload) -> JsonValue { - tool_output_code_mode_result(self, "multi_agent_message") - } -} - fn message_content(message: String) -> Result { if message.trim().is_empty() { return Err(FunctionCallError::RespondToModel( @@ -86,7 +63,7 @@ pub(crate) async fn handle_message_string_tool( target: String, message: String, interrupt: bool, -) -> Result { +) -> Result { handle_message_submission( invocation, mode, @@ -103,15 +80,13 @@ async fn handle_message_submission( target: String, prompt: String, interrupt: bool, -) -> Result { +) -> Result { let ToolInvocation { session, turn, - payload, call_id, .. } = invocation; - let _ = payload; let receiver_thread_id = resolve_agent_target(&session, &turn, &target).await?; let receiver_agent = session .services @@ -186,7 +161,7 @@ async fn handle_message_submission( .into(), ) .await; - let submission_id = result?; + result?; - Ok(MessageToolResult { submission_id }) + Ok(FunctionToolOutput::from_text(String::new(), Some(true))) } diff --git a/codex-rs/core/src/tools/handlers/multi_agents_v2/send_message.rs b/codex-rs/core/src/tools/handlers/multi_agents_v2/send_message.rs index a490d1100d..0d142623a9 100644 --- a/codex-rs/core/src/tools/handlers/multi_agents_v2/send_message.rs +++ b/codex-rs/core/src/tools/handlers/multi_agents_v2/send_message.rs @@ -1,13 +1,13 @@ use super::message_tool::MessageDeliveryMode; -use super::message_tool::MessageToolResult; use super::message_tool::SendMessageArgs; use super::message_tool::handle_message_string_tool; use super::*; +use crate::tools::context::FunctionToolOutput; pub(crate) struct Handler; impl ToolHandler for Handler { - type Output = MessageToolResult; + type Output = FunctionToolOutput; fn kind(&self) -> ToolKind { ToolKind::Function diff --git a/codex-rs/core/src/tools/handlers/multi_agents_v2/spawn.rs b/codex-rs/core/src/tools/handlers/multi_agents_v2/spawn.rs index 77c48af9e6..0d460b1a04 100644 --- a/codex-rs/core/src/tools/handlers/multi_agents_v2/spawn.rs +++ b/codex-rs/core/src/tools/handlers/multi_agents_v2/spawn.rs @@ -5,6 +5,7 @@ use crate::agent::control::render_input_preview; use crate::agent::next_thread_spawn_depth; use crate::agent::role::DEFAULT_ROLE_NAME; use crate::agent::role::apply_role_to_config; +use codex_features::Feature; use codex_protocol::AgentPath; use codex_protocol::models::DeveloperInstructions; use codex_protocol::protocol::InterAgentCommunication; @@ -68,8 +69,10 @@ impl ToolHandler for Handler { .into(), ) .await; - let mut config = - build_agent_spawn_config(&session.get_base_instructions().await, turn.as_ref())?; + let mut config = build_agent_spawn_config( + session.get_base_instructions().await.as_ref(), + turn.as_ref(), + )?; apply_requested_spawn_agent_model_overrides( &session, turn.as_ref(), @@ -206,11 +209,18 @@ impl ToolHandler for Handler { ) })?; - Ok(SpawnAgentResult { - agent_id: None, - task_name, - nickname, - }) + let hide_agent_metadata = turn + .config + .features + .enabled(Feature::DebugHideSpawnAgentMetadata); + if hide_agent_metadata { + Ok(SpawnAgentResult::HiddenMetadata { task_name }) + } else { + Ok(SpawnAgentResult::WithNickname { + task_name, + nickname, + }) + } } } @@ -266,10 +276,15 @@ impl SpawnAgentArgs { } #[derive(Debug, Serialize)] -pub(crate) struct SpawnAgentResult { - agent_id: Option, - task_name: String, - nickname: Option, +#[serde(untagged)] +pub(crate) enum SpawnAgentResult { + WithNickname { + task_name: String, + nickname: Option, + }, + HiddenMetadata { + task_name: String, + }, } impl ToolOutput for SpawnAgentResult { diff --git a/codex-rs/core/src/tools/handlers/request_user_input.rs b/codex-rs/core/src/tools/handlers/request_user_input.rs index dcc9445a94..69d222aa87 100644 --- a/codex-rs/core/src/tools/handlers/request_user_input.rs +++ b/codex-rs/core/src/tools/handlers/request_user_input.rs @@ -5,6 +5,7 @@ use crate::tools::context::ToolPayload; use crate::tools::handlers::parse_arguments; use crate::tools::registry::ToolHandler; use crate::tools::registry::ToolKind; +use codex_protocol::protocol::SessionSource; use codex_protocol::request_user_input::RequestUserInputArgs; use codex_tools::REQUEST_USER_INPUT_TOOL_NAME; use codex_tools::normalize_request_user_input_args; @@ -39,6 +40,12 @@ impl ToolHandler for RequestUserInputHandler { } }; + if matches!(turn.session_source, SessionSource::SubAgent(_)) { + return Err(FunctionCallError::RespondToModel( + "request_user_input can only be used by the root thread".to_string(), + )); + } + let mode = session.collaboration_mode().await.mode; if let Some(message) = request_user_input_unavailable_message(mode, self.default_mode_request_user_input) @@ -67,3 +74,7 @@ impl ToolHandler for RequestUserInputHandler { Ok(FunctionToolOutput::from_text(content, Some(true))) } } + +#[cfg(test)] +#[path = "request_user_input_tests.rs"] +mod tests; diff --git a/codex-rs/core/src/tools/handlers/request_user_input_tests.rs b/codex-rs/core/src/tools/handlers/request_user_input_tests.rs new file mode 100644 index 0000000000..0275a12860 --- /dev/null +++ b/codex-rs/core/src/tools/handlers/request_user_input_tests.rs @@ -0,0 +1,66 @@ +use super::*; +use crate::codex::make_session_and_context; +use crate::tools::context::ToolInvocation; +use crate::tools::context::ToolPayload; +use crate::turn_diff_tracker::TurnDiffTracker; +use codex_protocol::ThreadId; +use codex_protocol::protocol::SubAgentSource; +use pretty_assertions::assert_eq; +use serde_json::json; +use std::sync::Arc; +use tokio::sync::Mutex; + +#[tokio::test] +async fn multi_agent_v2_request_user_input_rejects_subagent_threads() { + let (session, mut turn) = make_session_and_context().await; + turn.session_source = SessionSource::SubAgent(SubAgentSource::ThreadSpawn { + parent_thread_id: ThreadId::new(), + depth: 1, + agent_path: None, + agent_nickname: None, + agent_role: None, + }); + + let result = RequestUserInputHandler { + default_mode_request_user_input: true, + } + .handle(ToolInvocation { + session: Arc::new(session), + turn: Arc::new(turn), + tracker: Arc::new(Mutex::new(TurnDiffTracker::default())), + call_id: "call-1".to_string(), + tool_name: REQUEST_USER_INPUT_TOOL_NAME.to_string(), + tool_namespace: None, + payload: ToolPayload::Function { + arguments: json!({ + "questions": [{ + "header": "Hdr", + "question": "Pick one", + "id": "pick_one", + "options": [ + { + "label": "A", + "description": "A" + }, + { + "label": "B", + "description": "B" + } + ] + }] + }) + .to_string(), + }, + }) + .await; + + let Err(err) = result else { + panic!("sub-agent request_user_input should fail"); + }; + assert_eq!( + err, + FunctionCallError::RespondToModel( + "request_user_input can only be used by the root thread".to_string(), + ) + ); +} diff --git a/codex-rs/core/src/tools/handlers/shell.rs b/codex-rs/core/src/tools/handlers/shell.rs index 6f662ff493..e835566247 100644 --- a/codex-rs/core/src/tools/handlers/shell.rs +++ b/codex-rs/core/src/tools/handlers/shell.rs @@ -39,6 +39,7 @@ use codex_protocol::models::PermissionProfile; use codex_protocol::protocol::ExecCommandSource; use codex_shell_command::is_safe_command::is_known_safe_command; use codex_tools::ShellCommandBackendConfig; +use codex_utils_absolute_path::AbsolutePathBuf; pub struct ShellHandler; @@ -395,6 +396,13 @@ impl ShellHandler { } = args; let mut exec_params = exec_params; + let Some(environment) = turn.environment.as_ref() else { + return Err(FunctionCallError::RespondToModel( + "shell is unavailable in this session".to_string(), + )); + }; + let fs = environment.get_filesystem(); + let dependency_env = session.dependency_env().await; if !dependency_env.is_empty() { exec_params.env.extend(dependency_env.clone()); @@ -458,9 +466,16 @@ impl ShellHandler { } // Intercept apply_patch if present. + let apply_patch_cwd = + AbsolutePathBuf::from_absolute_path(&exec_params.cwd).map_err(|err| { + FunctionCallError::RespondToModel(format!( + "apply_patch verification failed: failed to resolve cwd: {err}" + )) + })?; if let Some(output) = intercept_apply_patch( &exec_params.command, - &exec_params.cwd, + &apply_patch_cwd, + fs.as_ref(), exec_params.expiration.timeout_ms(), session.clone(), turn.clone(), diff --git a/codex-rs/core/src/tools/handlers/tool_search.rs b/codex-rs/core/src/tools/handlers/tool_search.rs index dd203e93b5..d1033c6135 100644 --- a/codex-rs/core/src/tools/handlers/tool_search.rs +++ b/codex-rs/core/src/tools/handlers/tool_search.rs @@ -7,7 +7,7 @@ use crate::tools::registry::ToolKind; use bm25::Document; use bm25::Language; use bm25::SearchEngineBuilder; -use codex_mcp::mcp_connection_manager::ToolInfo; +use codex_mcp::ToolInfo; use codex_tools::TOOL_SEARCH_DEFAULT_LIMIT; use codex_tools::TOOL_SEARCH_TOOL_NAME; use codex_tools::ToolSearchResultSource; diff --git a/codex-rs/core/src/tools/handlers/tool_suggest.rs b/codex-rs/core/src/tools/handlers/tool_suggest.rs index 380aebd51b..9975f5131f 100644 --- a/codex-rs/core/src/tools/handlers/tool_suggest.rs +++ b/codex-rs/core/src/tools/handlers/tool_suggest.rs @@ -1,7 +1,7 @@ use std::collections::HashSet; use codex_app_server_protocol::AppInfo; -use codex_mcp::mcp::CODEX_APPS_MCP_SERVER_NAME; +use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; use codex_rmcp_client::ElicitationAction; use codex_tools::DiscoverableTool; use codex_tools::DiscoverableToolAction; diff --git a/codex-rs/core/src/tools/handlers/unified_exec.rs b/codex-rs/core/src/tools/handlers/unified_exec.rs index dfe8c42752..b83068c66c 100644 --- a/codex-rs/core/src/tools/handlers/unified_exec.rs +++ b/codex-rs/core/src/tools/handlers/unified_exec.rs @@ -24,12 +24,13 @@ use crate::unified_exec::UnifiedExecProcessManager; use crate::unified_exec::WriteStdinRequest; use codex_features::Feature; use codex_otel::SessionTelemetry; -use codex_otel::metrics::names::TOOL_CALL_UNIFIED_EXEC_METRIC; +use codex_otel::TOOL_CALL_UNIFIED_EXEC_METRIC; use codex_protocol::models::PermissionProfile; use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::TerminalInteractionEvent; use codex_shell_command::is_safe_command::is_known_safe_command; use codex_tools::UnifiedExecShellMode; +use codex_utils_absolute_path::AbsolutePathBuf; use serde::Deserialize; use std::path::PathBuf; use std::sync::Arc; @@ -176,6 +177,13 @@ impl ToolHandler for UnifiedExecHandler { } }; + let Some(environment) = turn.environment.as_ref() else { + return Err(FunctionCallError::RespondToModel( + "unified exec is unavailable in this session".to_string(), + )); + }; + let fs = environment.get_filesystem(); + let manager: &UnifiedExecProcessManager = &session.services.unified_exec_manager; let context = UnifiedExecContext::new(session.clone(), turn.clone(), call_id.clone()); @@ -274,9 +282,19 @@ impl ToolHandler for UnifiedExecHandler { } }; + let apply_patch_cwd = match AbsolutePathBuf::from_absolute_path(&cwd) { + Ok(cwd) => cwd, + Err(err) => { + manager.release_process_id(process_id).await; + return Err(FunctionCallError::RespondToModel(format!( + "apply_patch verification failed: failed to resolve cwd: {err}" + ))); + } + }; if let Some(output) = intercept_apply_patch( &command, - &cwd, + &apply_patch_cwd, + fs.as_ref(), Some(yield_time_ms), context.session.clone(), context.turn.clone(), diff --git a/codex-rs/core/src/tools/handlers/view_image.rs b/codex-rs/core/src/tools/handlers/view_image.rs index ec34fafb36..5fa005bac8 100644 --- a/codex-rs/core/src/tools/handlers/view_image.rs +++ b/codex-rs/core/src/tools/handlers/view_image.rs @@ -91,9 +91,13 @@ impl ToolHandler for ViewImageHandler { AbsolutePathBuf::try_from(turn.resolve_path(Some(args.path))).map_err(|error| { FunctionCallError::RespondToModel(format!("unable to resolve image path: {error}")) })?; + let Some(environment) = turn.environment.as_ref() else { + return Err(FunctionCallError::RespondToModel( + "view_image is unavailable in this session".to_string(), + )); + }; - let metadata = turn - .environment + let metadata = environment .get_filesystem() .get_metadata(&abs_path) .await @@ -110,8 +114,7 @@ impl ToolHandler for ViewImageHandler { abs_path.display() ))); } - let file_bytes = turn - .environment + let file_bytes = environment .get_filesystem() .read_file(&abs_path) .await diff --git a/codex-rs/core/src/tools/js_repl/mod_tests.rs b/codex-rs/core/src/tools/js_repl/mod_tests.rs index bbc0f50dcd..bdf6931330 100644 --- a/codex-rs/core/src/tools/js_repl/mod_tests.rs +++ b/codex-rs/core/src/tools/js_repl/mod_tests.rs @@ -1028,7 +1028,7 @@ async fn js_repl_waits_for_unawaited_tool_calls_before_completion() -> anyhow::R let marker = turn .cwd - .join(format!("js-repl-unawaited-marker-{}.txt", Uuid::new_v4()))?; + .join(format!("js-repl-unawaited-marker-{}.txt", Uuid::new_v4())); let marker_json = serde_json::to_string(&marker.to_string_lossy().to_string())?; let result = manager .execute( @@ -1073,10 +1073,10 @@ async fn js_repl_persisted_tool_helpers_work_across_cells() -> anyhow::Result<() let global_marker = turn .cwd - .join(format!("js-repl-global-helper-{}.txt", Uuid::new_v4()))?; + .join(format!("js-repl-global-helper-{}.txt", Uuid::new_v4())); let lexical_marker = turn .cwd - .join(format!("js-repl-lexical-helper-{}.txt", Uuid::new_v4()))?; + .join(format!("js-repl-lexical-helper-{}.txt", Uuid::new_v4())); let global_marker_json = serde_json::to_string(&global_marker.to_string_lossy().to_string())?; let lexical_marker_json = serde_json::to_string(&lexical_marker.to_string_lossy().to_string())?; diff --git a/codex-rs/core/src/tools/mod.rs b/codex-rs/core/src/tools/mod.rs index f94f896d20..06c20d6922 100644 --- a/codex-rs/core/src/tools/mod.rs +++ b/codex-rs/core/src/tools/mod.rs @@ -1,16 +1,16 @@ -pub mod code_mode; -pub mod context; -pub mod events; +pub(crate) mod code_mode; +pub(crate) mod context; +pub(crate) mod events; pub(crate) mod handlers; -pub mod js_repl; +pub(crate) mod js_repl; pub(crate) mod network_approval; -pub mod orchestrator; -pub mod parallel; -pub mod registry; -pub mod router; -pub mod runtimes; -pub mod sandboxing; -pub mod spec; +pub(crate) mod orchestrator; +pub(crate) mod parallel; +pub(crate) mod registry; +pub(crate) mod router; +pub(crate) mod runtimes; +pub(crate) mod sandboxing; +pub(crate) mod spec; use codex_protocol::exec_output::ExecToolCallOutput; use codex_utils_output_truncation::TruncationPolicy; diff --git a/codex-rs/core/src/tools/router.rs b/codex-rs/core/src/tools/router.rs index 4fd64715bb..aad8c1b7dc 100644 --- a/codex-rs/core/src/tools/router.rs +++ b/codex-rs/core/src/tools/router.rs @@ -8,7 +8,7 @@ use crate::tools::context::ToolPayload; use crate::tools::registry::AnyToolResult; use crate::tools::registry::ToolRegistry; use crate::tools::spec::build_specs_with_discoverable_tools; -use codex_mcp::mcp_connection_manager::ToolInfo; +use codex_mcp::ToolInfo; use codex_protocol::dynamic_tools::DynamicToolSpec; use codex_protocol::models::LocalShellAction; use codex_protocol::models::ResponseItem; diff --git a/codex-rs/core/src/tools/runtimes/apply_patch.rs b/codex-rs/core/src/tools/runtimes/apply_patch.rs index a6dc8885a4..efb9a8a8f5 100644 --- a/codex-rs/core/src/tools/runtimes/apply_patch.rs +++ b/codex-rs/core/src/tools/runtimes/apply_patch.rs @@ -58,7 +58,7 @@ impl ApplyPatchRuntime { ) -> GuardianApprovalRequest { GuardianApprovalRequest::ApplyPatch { id: call_id.to_string(), - cwd: req.action.cwd.clone(), + cwd: req.action.cwd.to_path_buf(), files: req.file_paths.clone(), patch: req.action.patch.clone(), } @@ -101,7 +101,7 @@ impl ApplyPatchRuntime { CODEX_CORE_APPLY_PATCH_ARG1.to_string(), req.action.patch.clone(), ], - cwd: req.action.cwd.clone(), + cwd: req.action.cwd.to_path_buf(), // Run apply_patch with a minimal environment for determinism and to avoid leaks. env: HashMap::new(), additional_permissions: req.additional_permissions.clone(), diff --git a/codex-rs/core/src/tools/runtimes/apply_patch_tests.rs b/codex-rs/core/src/tools/runtimes/apply_patch_tests.rs index 3d5ac27cb2..e5831414ca 100644 --- a/codex-rs/core/src/tools/runtimes/apply_patch_tests.rs +++ b/codex-rs/core/src/tools/runtimes/apply_patch_tests.rs @@ -1,5 +1,6 @@ use super::*; use codex_protocol::protocol::GranularApprovalConfig; +use core_test_support::PathBufExt; use pretty_assertions::assert_eq; use std::collections::HashMap; #[cfg(not(target_os = "windows"))] @@ -31,17 +32,17 @@ fn wants_no_sandbox_approval_granular_respects_sandbox_flag() { #[test] fn guardian_review_request_includes_patch_context() { - let path = std::env::temp_dir().join("guardian-apply-patch-test.txt"); + let path = std::env::temp_dir() + .join("guardian-apply-patch-test.txt") + .abs(); let action = ApplyPatchAction::new_add_for_test(&path, "hello".to_string()); - let expected_cwd = action.cwd.clone(); + let expected_cwd = action.cwd.to_path_buf(); let expected_patch = action.patch.clone(); let request = ApplyPatchRequest { action, - file_paths: vec![ - AbsolutePathBuf::from_absolute_path(&path).expect("temp path should be absolute"), - ], + file_paths: vec![path.clone()], changes: HashMap::from([( - path, + path.to_path_buf(), FileChange::Add { content: "hello".to_string(), }, @@ -71,15 +72,15 @@ fn guardian_review_request_includes_patch_context() { #[cfg(not(target_os = "windows"))] #[test] fn build_sandbox_command_prefers_configured_codex_self_exe_for_apply_patch() { - let path = std::env::temp_dir().join("apply-patch-current-exe-test.txt"); + let path = std::env::temp_dir() + .join("apply-patch-current-exe-test.txt") + .abs(); let action = ApplyPatchAction::new_add_for_test(&path, "hello".to_string()); let request = ApplyPatchRequest { action, - file_paths: vec![ - AbsolutePathBuf::from_absolute_path(&path).expect("temp path should be absolute"), - ], + file_paths: vec![path.clone()], changes: HashMap::from([( - path, + path.to_path_buf(), FileChange::Add { content: "hello".to_string(), }, @@ -103,15 +104,15 @@ fn build_sandbox_command_prefers_configured_codex_self_exe_for_apply_patch() { #[cfg(not(target_os = "windows"))] #[test] fn build_sandbox_command_falls_back_to_current_exe_for_apply_patch() { - let path = std::env::temp_dir().join("apply-patch-current-exe-test.txt"); + let path = std::env::temp_dir() + .join("apply-patch-current-exe-test.txt") + .abs(); let action = ApplyPatchAction::new_add_for_test(&path, "hello".to_string()); let request = ApplyPatchRequest { action, - file_paths: vec![ - AbsolutePathBuf::from_absolute_path(&path).expect("temp path should be absolute"), - ], + file_paths: vec![path.clone()], changes: HashMap::from([( - path, + path.to_path_buf(), FileChange::Add { content: "hello".to_string(), }, diff --git a/codex-rs/core/src/tools/runtimes/mod.rs b/codex-rs/core/src/tools/runtimes/mod.rs index 53890a89b7..99b5371f17 100644 --- a/codex-rs/core/src/tools/runtimes/mod.rs +++ b/codex-rs/core/src/tools/runtimes/mod.rs @@ -4,6 +4,7 @@ Module: runtimes Concrete ToolRuntime implementations for specific tools. Each runtime stays small and focused and reuses the orchestrator for approvals + sandbox + retry. */ +use crate::exec_env::CODEX_THREAD_ID_ENV_VAR; use crate::path_utils; use crate::shell::Shell; use crate::tools::sandboxing::ToolError; @@ -12,9 +13,9 @@ use codex_sandboxing::SandboxCommand; use std::collections::HashMap; use std::path::Path; -pub mod apply_patch; -pub mod shell; -pub mod unified_exec; +pub(crate) mod apply_patch; +pub(crate) mod shell; +pub(crate) mod unified_exec; /// Shared helper to construct sandbox transform inputs from a tokenized command line. /// Validates that at least a program is present. @@ -48,11 +49,19 @@ pub(crate) fn build_sandbox_command( /// This wrapper script uses POSIX constructs (`if`, `.`, `exec`) so it can /// be run by Bash/Zsh/sh. On non-matching commands, or when command cwd does /// not match the snapshot cwd, this is a no-op. +/// +/// `explicit_env_overrides` and `env` are intentionally separate inputs. +/// `explicit_env_overrides` contains policy-driven shell env overrides that +/// should win after the snapshot is sourced, while `env` is the full live exec +/// environment. We need access to both so snapshot restore logic can preserve +/// runtime-only vars like `CODEX_THREAD_ID` without pretending they came from +/// the explicit override policy. pub(crate) fn maybe_wrap_shell_lc_with_snapshot( command: &[String], session_shell: &Shell, cwd: &Path, explicit_env_overrides: &HashMap, + env: &HashMap, ) -> Vec { if cfg!(windows) { return command.to_vec(); @@ -95,7 +104,11 @@ pub(crate) fn maybe_wrap_shell_lc_with_snapshot( .iter() .map(|arg| format!(" '{}'", shell_single_quote(arg))) .collect::(); - let (override_captures, override_exports) = build_override_exports(explicit_env_overrides); + let mut override_env = explicit_env_overrides.clone(); + if let Some(thread_id) = env.get(CODEX_THREAD_ID_ENV_VAR) { + override_env.insert(CODEX_THREAD_ID_ENV_VAR.to_string(), thread_id.clone()); + } + let (override_captures, override_exports) = build_override_exports(&override_env); let rewritten_script = if override_exports.is_empty() { format!( "if . '{snapshot_path}' >/dev/null 2>&1; then :; fi\n\nexec '{original_shell}' -c '{original_script}'{trailing_args}" diff --git a/codex-rs/core/src/tools/runtimes/mod_tests.rs b/codex-rs/core/src/tools/runtimes/mod_tests.rs index dbc341d1de..22c22770ab 100644 --- a/codex-rs/core/src/tools/runtimes/mod_tests.rs +++ b/codex-rs/core/src/tools/runtimes/mod_tests.rs @@ -42,8 +42,13 @@ fn maybe_wrap_shell_lc_with_snapshot_bootstraps_in_user_shell() { "echo hello".to_string(), ]; - let rewritten = - maybe_wrap_shell_lc_with_snapshot(&command, &session_shell, dir.path(), &HashMap::new()); + let rewritten = maybe_wrap_shell_lc_with_snapshot( + &command, + &session_shell, + dir.path(), + &HashMap::new(), + &HashMap::new(), + ); assert_eq!(rewritten[0], "/bin/zsh"); assert_eq!(rewritten[1], "-c"); @@ -68,8 +73,13 @@ fn maybe_wrap_shell_lc_with_snapshot_escapes_single_quotes() { "echo 'hello'".to_string(), ]; - let rewritten = - maybe_wrap_shell_lc_with_snapshot(&command, &session_shell, dir.path(), &HashMap::new()); + let rewritten = maybe_wrap_shell_lc_with_snapshot( + &command, + &session_shell, + dir.path(), + &HashMap::new(), + &HashMap::new(), + ); assert!(rewritten[2].contains(r#"exec '/bin/bash' -c 'echo '"'"'hello'"'"''"#)); } @@ -91,8 +101,13 @@ fn maybe_wrap_shell_lc_with_snapshot_uses_bash_bootstrap_shell() { "echo hello".to_string(), ]; - let rewritten = - maybe_wrap_shell_lc_with_snapshot(&command, &session_shell, dir.path(), &HashMap::new()); + let rewritten = maybe_wrap_shell_lc_with_snapshot( + &command, + &session_shell, + dir.path(), + &HashMap::new(), + &HashMap::new(), + ); assert_eq!(rewritten[0], "/bin/bash"); assert_eq!(rewritten[1], "-c"); @@ -117,8 +132,13 @@ fn maybe_wrap_shell_lc_with_snapshot_uses_sh_bootstrap_shell() { "echo hello".to_string(), ]; - let rewritten = - maybe_wrap_shell_lc_with_snapshot(&command, &session_shell, dir.path(), &HashMap::new()); + let rewritten = maybe_wrap_shell_lc_with_snapshot( + &command, + &session_shell, + dir.path(), + &HashMap::new(), + &HashMap::new(), + ); assert_eq!(rewritten[0], "/bin/sh"); assert_eq!(rewritten[1], "-c"); @@ -145,8 +165,13 @@ fn maybe_wrap_shell_lc_with_snapshot_preserves_trailing_args() { "arg1".to_string(), ]; - let rewritten = - maybe_wrap_shell_lc_with_snapshot(&command, &session_shell, dir.path(), &HashMap::new()); + let rewritten = maybe_wrap_shell_lc_with_snapshot( + &command, + &session_shell, + dir.path(), + &HashMap::new(), + &HashMap::new(), + ); assert!( rewritten[2] @@ -171,8 +196,13 @@ fn maybe_wrap_shell_lc_with_snapshot_skips_when_cwd_mismatch() { "echo hello".to_string(), ]; - let rewritten = - maybe_wrap_shell_lc_with_snapshot(&command, &session_shell, &command_cwd, &HashMap::new()); + let rewritten = maybe_wrap_shell_lc_with_snapshot( + &command, + &session_shell, + &command_cwd, + &HashMap::new(), + &HashMap::new(), + ); assert_eq!(rewritten, command); } @@ -195,8 +225,13 @@ fn maybe_wrap_shell_lc_with_snapshot_accepts_dot_alias_cwd() { ]; let command_cwd = dir.path().join("."); - let rewritten = - maybe_wrap_shell_lc_with_snapshot(&command, &session_shell, &command_cwd, &HashMap::new()); + let rewritten = maybe_wrap_shell_lc_with_snapshot( + &command, + &session_shell, + &command_cwd, + &HashMap::new(), + &HashMap::new(), + ); assert_eq!(rewritten[0], "/bin/zsh"); assert_eq!(rewritten[1], "-c"); @@ -231,6 +266,7 @@ fn maybe_wrap_shell_lc_with_snapshot_restores_explicit_override_precedence() { &session_shell, dir.path(), &explicit_env_overrides, + &HashMap::from([("TEST_ENV_SNAPSHOT".to_string(), "worktree".to_string())]), ); let output = Command::new(&rewritten[0]) .args(&rewritten[1..]) @@ -245,6 +281,43 @@ fn maybe_wrap_shell_lc_with_snapshot_restores_explicit_override_precedence() { ); } +#[test] +fn maybe_wrap_shell_lc_with_snapshot_restores_codex_thread_id_from_env() { + let dir = tempdir().expect("create temp dir"); + let snapshot_path = dir.path().join("snapshot.sh"); + std::fs::write( + &snapshot_path, + "# Snapshot file\nexport CODEX_THREAD_ID='parent-thread'\n", + ) + .expect("write snapshot"); + let session_shell = shell_with_snapshot( + ShellType::Bash, + "/bin/bash", + snapshot_path, + dir.path().to_path_buf(), + ); + let command = vec![ + "/bin/bash".to_string(), + "-lc".to_string(), + "printf '%s' \"$CODEX_THREAD_ID\"".to_string(), + ]; + let rewritten = maybe_wrap_shell_lc_with_snapshot( + &command, + &session_shell, + dir.path(), + &HashMap::new(), + &HashMap::from([("CODEX_THREAD_ID".to_string(), "nested-thread".to_string())]), + ); + let output = Command::new(&rewritten[0]) + .args(&rewritten[1..]) + .env("CODEX_THREAD_ID", "nested-thread") + .output() + .expect("run rewritten command"); + + assert!(output.status.success(), "command failed: {output:?}"); + assert_eq!(String::from_utf8_lossy(&output.stdout), "nested-thread"); +} + #[test] fn maybe_wrap_shell_lc_with_snapshot_keeps_snapshot_path_without_override() { let dir = tempdir().expect("create temp dir"); @@ -265,8 +338,13 @@ fn maybe_wrap_shell_lc_with_snapshot_keeps_snapshot_path_without_override() { "-lc".to_string(), "printf '%s' \"$PATH\"".to_string(), ]; - let rewritten = - maybe_wrap_shell_lc_with_snapshot(&command, &session_shell, dir.path(), &HashMap::new()); + let rewritten = maybe_wrap_shell_lc_with_snapshot( + &command, + &session_shell, + dir.path(), + &HashMap::new(), + &HashMap::new(), + ); let output = Command::new(&rewritten[0]) .args(&rewritten[1..]) .output() @@ -302,6 +380,7 @@ fn maybe_wrap_shell_lc_with_snapshot_applies_explicit_path_override() { &session_shell, dir.path(), &explicit_env_overrides, + &HashMap::from([("PATH".to_string(), "/worktree/bin".to_string())]), ); let output = Command::new(&rewritten[0]) .args(&rewritten[1..]) @@ -342,6 +421,10 @@ fn maybe_wrap_shell_lc_with_snapshot_does_not_embed_override_values_in_argv() { &session_shell, dir.path(), &explicit_env_overrides, + &HashMap::from([( + "OPENAI_API_KEY".to_string(), + "super-secret-value".to_string(), + )]), ); assert!(!rewritten[2].contains("super-secret-value")); @@ -386,6 +469,7 @@ fn maybe_wrap_shell_lc_with_snapshot_preserves_unset_override_variables() { &session_shell, dir.path(), &explicit_env_overrides, + &HashMap::new(), ); let output = Command::new(&rewritten[0]) diff --git a/codex-rs/core/src/tools/runtimes/shell.rs b/codex-rs/core/src/tools/runtimes/shell.rs index ba8713254a..7b0c81ac60 100644 --- a/codex-rs/core/src/tools/runtimes/shell.rs +++ b/codex-rs/core/src/tools/runtimes/shell.rs @@ -226,6 +226,7 @@ impl ToolRuntime for ShellRuntime { session_shell.as_ref(), &req.cwd, &req.explicit_env_overrides, + &req.env, ); let command = if matches!(session_shell.shell_type, ShellType::PowerShell) { prefix_powershell_script_with_utf8(&command) diff --git a/codex-rs/core/src/tools/runtimes/unified_exec.rs b/codex-rs/core/src/tools/runtimes/unified_exec.rs index b913d00c72..2aa0941fe1 100644 --- a/codex-rs/core/src/tools/runtimes/unified_exec.rs +++ b/codex-rs/core/src/tools/runtimes/unified_exec.rs @@ -230,6 +230,7 @@ impl<'a> ToolRuntime for UnifiedExecRunt session_shell.as_ref(), &req.cwd, &req.explicit_env_overrides, + &req.env, ); let command = if matches!(session_shell.shell_type, ShellType::PowerShell) { prefix_powershell_script_with_utf8(&command) @@ -302,7 +303,7 @@ impl<'a> ToolRuntime for UnifiedExecRunt &prepared.exec_request, req.tty, prepared.spawn_lifecycle, - ctx.turn.environment.as_ref(), + environment.as_ref(), ) .await .map_err(|err| match err { @@ -332,13 +333,18 @@ impl<'a> ToolRuntime for UnifiedExecRunt let exec_env = attempt .env_for(command, options, req.network.as_ref()) .map_err(|err| ToolError::Codex(err.into()))?; + let Some(environment) = ctx.turn.environment.as_ref() else { + return Err(ToolError::Rejected( + "exec_command is unavailable in this session".to_string(), + )); + }; self.manager .open_session_with_exec_env( req.process_id, &exec_env, req.tty, Box::new(NoopSpawnLifecycle), - ctx.turn.environment.as_ref(), + environment.as_ref(), ) .await .map_err(|err| match err { diff --git a/codex-rs/core/src/tools/spec.rs b/codex-rs/core/src/tools/spec.rs index 90b9a382aa..37a68d4081 100644 --- a/codex-rs/core/src/tools/spec.rs +++ b/codex-rs/core/src/tools/spec.rs @@ -5,8 +5,8 @@ use crate::tools::handlers::multi_agents_common::DEFAULT_WAIT_TIMEOUT_MS; use crate::tools::handlers::multi_agents_common::MAX_WAIT_TIMEOUT_MS; use crate::tools::handlers::multi_agents_common::MIN_WAIT_TIMEOUT_MS; use crate::tools::registry::ToolRegistryBuilder; -use codex_mcp::mcp::CODEX_APPS_MCP_SERVER_NAME; -use codex_mcp::mcp_connection_manager::ToolInfo; +use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; +use codex_mcp::ToolInfo; use codex_protocol::dynamic_tools::DynamicToolSpec; use codex_tools::DiscoverableTool; use codex_tools::ToolHandlerKind; diff --git a/codex-rs/core/src/tools/spec_tests.rs b/codex-rs/core/src/tools/spec_tests.rs index 4615b45319..13fa4930db 100644 --- a/codex-rs/core/src/tools/spec_tests.rs +++ b/codex-rs/core/src/tools/spec_tests.rs @@ -8,7 +8,7 @@ use crate::tools::router::ToolRouterParams; use codex_app_server_protocol::AppInfo; use codex_features::Feature; use codex_features::Features; -use codex_mcp::mcp::CODEX_APPS_MCP_SERVER_NAME; +use codex_mcp::CODEX_APPS_MCP_SERVER_NAME; use codex_models_manager::bundled_models_response; use codex_models_manager::model_info::with_config_overrides; use codex_protocol::config_types::WebSearchMode; diff --git a/codex-rs/core/src/turn_timing.rs b/codex-rs/core/src/turn_timing.rs index c68f16e451..6c47d3b528 100644 --- a/codex-rs/core/src/turn_timing.rs +++ b/codex-rs/core/src/turn_timing.rs @@ -1,8 +1,10 @@ use std::time::Duration; use std::time::Instant; +use std::time::SystemTime; +use std::time::UNIX_EPOCH; -use codex_otel::metrics::names::TURN_TTFM_DURATION_METRIC; -use codex_otel::metrics::names::TURN_TTFT_DURATION_METRIC; +use codex_otel::TURN_TTFM_DURATION_METRIC; +use codex_otel::TURN_TTFT_DURATION_METRIC; use codex_protocol::items::TurnItem; use codex_protocol::models::ResponseItem; use tokio::sync::Mutex; @@ -45,6 +47,7 @@ pub(crate) struct TurnTimingState { #[derive(Debug, Default)] struct TurnTimingStateInner { started_at: Option, + started_at_unix_secs: Option, first_token_at: Option, first_message_at: Option, } @@ -53,10 +56,24 @@ impl TurnTimingState { pub(crate) async fn mark_turn_started(&self, started_at: Instant) { let mut state = self.state.lock().await; state.started_at = Some(started_at); + state.started_at_unix_secs = Some(now_unix_timestamp_secs()); state.first_token_at = None; state.first_message_at = None; } + pub(crate) async fn started_at_unix_secs(&self) -> Option { + self.state.lock().await.started_at_unix_secs + } + + pub(crate) async fn completed_at_and_duration_ms(&self) -> (Option, Option) { + let state = self.state.lock().await; + let completed_at = Some(now_unix_timestamp_secs()); + let duration_ms = state + .started_at + .map(|started_at| i64::try_from(started_at.elapsed().as_millis()).unwrap_or(i64::MAX)); + (completed_at, duration_ms) + } + pub(crate) async fn record_ttft_for_response_event( &self, event: &ResponseEvent, @@ -77,6 +94,13 @@ impl TurnTimingState { } } +fn now_unix_timestamp_secs() -> i64 { + let duration = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default(); + i64::try_from(duration.as_secs()).unwrap_or(i64::MAX) +} + impl TurnTimingStateInner { fn record_turn_ttft(&mut self) -> Option { if self.first_token_at.is_some() { diff --git a/codex-rs/core/src/unified_exec/mod_tests.rs b/codex-rs/core/src/unified_exec/mod_tests.rs index 8348614f95..a5db73bf45 100644 --- a/codex-rs/core/src/unified_exec/mod_tests.rs +++ b/codex-rs/core/src/unified_exec/mod_tests.rs @@ -98,7 +98,7 @@ async fn exec_command_with_tty( &request, tty, Box::new(NoopSpawnLifecycle), - turn.environment.as_ref(), + turn.environment.as_ref().expect("turn environment"), ) .await?, ); @@ -593,7 +593,7 @@ async fn remote_exec_server_rejects_inherited_fd_launches() -> anyhow::Result<() let remote_test_env = remote_test_env().await?; let (_, mut turn) = make_session_and_context().await; - turn.environment = Arc::new(remote_test_env.environment().clone()); + turn.environment = Some(Arc::new(remote_test_env.environment().clone())); let request = test_exec_request( &turn, @@ -611,7 +611,7 @@ async fn remote_exec_server_rejects_inherited_fd_launches() -> anyhow::Result<() Box::new(TestSpawnLifecycle { inherited_fds: vec![42], }), - turn.environment.as_ref(), + turn.environment.as_ref().expect("turn environment"), ) .await .expect_err("expected inherited fd rejection"); diff --git a/codex-rs/core/src/unified_exec/process_manager.rs b/codex-rs/core/src/unified_exec/process_manager.rs index f99a39dd12..43ba2bc96e 100644 --- a/codex-rs/core/src/unified_exec/process_manager.rs +++ b/codex-rs/core/src/unified_exec/process_manager.rs @@ -582,18 +582,18 @@ impl UnifiedExecProcessManager { pub(crate) async fn open_session_with_exec_env( &self, process_id: i32, - env: &ExecRequest, + request: &ExecRequest, tty: bool, mut spawn_lifecycle: SpawnLifecycleHandle, environment: &codex_exec_server::Environment, ) -> Result { - let (program, args) = env + let (program, args) = request .command .split_first() .ok_or(UnifiedExecError::MissingCommandLine)?; let inherited_fds = spawn_lifecycle.inherited_fds(); - if environment.exec_server_url().is_some() { + if environment.is_remote() { if !inherited_fds.is_empty() { return Err(UnifiedExecError::create_process( "remote exec-server does not support inherited file descriptors".to_string(), @@ -604,25 +604,25 @@ impl UnifiedExecProcessManager { .get_exec_backend() .start(codex_exec_server::ExecParams { process_id: exec_server_process_id(process_id).into(), - argv: env.command.clone(), - cwd: env.cwd.clone(), - env: env.env.clone(), + argv: request.command.clone(), + cwd: request.cwd.clone(), + env: request.env.clone(), tty, arg0: env.arg0.clone(), sandbox: codex_sandboxing::SandboxLaunchConfig::no_sandbox(env.cwd.clone()), }) .await .map_err(|err| UnifiedExecError::create_process(err.to_string()))?; - return UnifiedExecProcess::from_remote_started(started, env.sandbox).await; + return UnifiedExecProcess::from_remote_started(started, request.sandbox).await; } let spawn_result = if tty { codex_utils_pty::pty::spawn_process_with_inherited_fds( program, args, - env.cwd.as_path(), - &env.env, - &env.arg0, + request.cwd.as_path(), + &request.env, + &request.arg0, codex_utils_pty::TerminalSize::default(), &inherited_fds, ) @@ -631,9 +631,9 @@ impl UnifiedExecProcessManager { codex_utils_pty::pipe::spawn_process_no_stdin_with_inherited_fds( program, args, - env.cwd.as_path(), - &env.env, - &env.arg0, + request.cwd.as_path(), + &request.env, + &request.arg0, &inherited_fds, ) .await @@ -641,7 +641,7 @@ impl UnifiedExecProcessManager { let spawned = spawn_result.map_err(|err| UnifiedExecError::create_process(err.to_string()))?; spawn_lifecycle.after_spawn(); - UnifiedExecProcess::from_spawned(spawned, env.sandbox, spawn_lifecycle).await + UnifiedExecProcess::from_spawned(spawned, request.sandbox, spawn_lifecycle).await } pub(crate) async fn open_session_with_remote_exec( diff --git a/codex-rs/core/src/windows_sandbox.rs b/codex-rs/core/src/windows_sandbox.rs index 0def23d00c..65230b1a17 100644 --- a/codex-rs/core/src/windows_sandbox.rs +++ b/codex-rs/core/src/windows_sandbox.rs @@ -1,7 +1,7 @@ use crate::config::Config; -use crate::config::ConfigToml; use crate::config::edit::ConfigEditsBuilder; -use crate::config::profile::ConfigProfile; +use codex_config::config_toml::ConfigToml; +use codex_config::profile_toml::ConfigProfile; use codex_config::types::WindowsSandboxModeToml; use codex_features::Feature; use codex_features::Features; @@ -363,7 +363,7 @@ fn emit_windows_sandbox_setup_success_metrics( originator_tag: &str, duration: std::time::Duration, ) { - let Some(metrics) = codex_otel::metrics::global() else { + let Some(metrics) = codex_otel::global() else { return; }; let mode_tag = windows_sandbox_setup_mode_tag(mode); @@ -389,7 +389,7 @@ fn emit_windows_sandbox_setup_failure_metrics( duration: std::time::Duration, _err: &anyhow::Error, ) { - let Some(metrics) = codex_otel::metrics::global() else { + let Some(metrics) = codex_otel::global() else { return; }; let mode_tag = windows_sandbox_setup_mode_tag(mode); diff --git a/codex-rs/core/tests/common/lib.rs b/codex-rs/core/tests/common/lib.rs index 8aff42e96f..d16054e6db 100644 --- a/codex-rs/core/tests/common/lib.rs +++ b/codex-rs/core/tests/common/lib.rs @@ -13,8 +13,9 @@ use codex_core::config::Config; use codex_core::config::ConfigBuilder; use codex_core::config::ConfigOverrides; use codex_utils_absolute_path::AbsolutePathBuf; +pub use codex_utils_absolute_path::test_support::PathBufExt; +pub use codex_utils_absolute_path::test_support::PathExt; use regex_lite::Regex; -use std::path::Path; use std::path::PathBuf; pub mod apps_test_server; @@ -105,26 +106,6 @@ pub fn test_absolute_path(unix_path: &str) -> AbsolutePathBuf { test_absolute_path_with_windows(unix_path, /*windows_path*/ None) } -pub trait PathExt { - fn abs(&self) -> AbsolutePathBuf; -} - -impl PathExt for Path { - fn abs(&self) -> AbsolutePathBuf { - AbsolutePathBuf::try_from(self.to_path_buf()).expect("path should already be absolute") - } -} - -pub trait PathBufExt { - fn abs(&self) -> AbsolutePathBuf; -} - -impl PathBufExt for PathBuf { - fn abs(&self) -> AbsolutePathBuf { - self.as_path().abs() - } -} - pub trait TempDirExt { fn abs(&self) -> AbsolutePathBuf; } diff --git a/codex-rs/core/tests/common/test_codex.rs b/codex-rs/core/tests/common/test_codex.rs index 10957fd9f0..bffd0cbfd6 100644 --- a/codex-rs/core/tests/common/test_codex.rs +++ b/codex-rs/core/tests/common/test_codex.rs @@ -1,3 +1,4 @@ +use std::future::Future; use std::mem::swap; use std::path::Path; use std::path::PathBuf; @@ -34,12 +35,12 @@ use codex_protocol::protocol::SessionConfiguredEvent; use codex_protocol::protocol::SessionSource; use codex_protocol::user_input::UserInput; use codex_utils_absolute_path::AbsolutePathBuf; +use futures::future::BoxFuture; use serde_json::Value; use tempfile::TempDir; use wiremock::MockServer; use crate::PathBufExt; -use crate::PathExt; use crate::RemoteEnvConfig; use crate::TempDirExt; use crate::get_remote_test_env; @@ -55,6 +56,8 @@ use wiremock::matchers::path_regex; type ConfigMutator = dyn FnOnce(&mut Config) + Send; type PreBuildHook = dyn FnOnce(&Path) + Send + 'static; +type WorkspaceSetup = dyn FnOnce(AbsolutePathBuf, Arc) -> BoxFuture<'static, Result<()>> + + Send; const TEST_MODEL_WITH_EXPERIMENTAL_TOOLS: &str = "test-gpt-5.1-codex"; const REMOTE_EXEC_SERVER_START_TIMEOUT: Duration = Duration::from_secs(5); const REMOTE_EXEC_SERVER_POLL_INTERVAL: Duration = Duration::from_millis(25); @@ -97,24 +100,28 @@ impl RemoteExecServerProcess { #[derive(Debug)] pub struct TestEnv { environment: codex_exec_server::Environment, - cwd: PathBuf, - _local_cwd_temp_dir: Option, + cwd: AbsolutePathBuf, + local_cwd_temp_dir: Option>, _remote_exec_server_process: Option, } impl TestEnv { pub async fn local() -> Result { - let local_cwd_temp_dir = TempDir::new()?; - let cwd = local_cwd_temp_dir.path().to_path_buf(); + let local_cwd_temp_dir = Arc::new(TempDir::new()?); + let cwd = local_cwd_temp_dir.abs(); let environment = codex_exec_server::Environment::create(/*exec_server_url*/ None).await?; Ok(Self { environment, cwd, - _local_cwd_temp_dir: Some(local_cwd_temp_dir), + local_cwd_temp_dir: Some(local_cwd_temp_dir), _remote_exec_server_process: None, }) } + pub fn cwd(&self) -> &AbsolutePathBuf { + &self.cwd + } + pub fn environment(&self) -> &codex_exec_server::Environment { &self.environment } @@ -122,6 +129,10 @@ impl TestEnv { pub fn exec_server_url(&self) -> Option<&str> { self.environment.exec_server_url() } + + fn local_cwd_temp_dir(&self) -> Option> { + self.local_cwd_temp_dir.clone() + } } pub async fn test_env() -> Result { @@ -134,16 +145,13 @@ pub async fn test_env() -> Result { let cwd = remote_aware_cwd_path(); environment .get_filesystem() - .create_directory( - &absolute_path(&cwd)?, - CreateDirectoryOptions { recursive: true }, - ) + .create_directory(&cwd, CreateDirectoryOptions { recursive: true }) .await?; - remote_process.process.register_cleanup_path(&cwd); + remote_process.process.register_cleanup_path(cwd.as_path()); Ok(TestEnv { environment, cwd, - _local_cwd_temp_dir: None, + local_cwd_temp_dir: None, _remote_exec_server_process: Some(remote_process.process), }) } @@ -201,11 +209,12 @@ echo $!" }) } -fn remote_aware_cwd_path() -> PathBuf { +fn remote_aware_cwd_path() -> AbsolutePathBuf { PathBuf::from(format!( "/tmp/codex-core-test-cwd-{}", remote_exec_server_instance_id() )) + .abs() } fn wait_for_remote_listen_url(container_name: &str, stdout_path: &str) -> Result { @@ -299,10 +308,6 @@ fn docker_command_capture_stdout(args: [&str; N]) -> Result Result { - Ok(path.abs()) -} - /// A collection of different ways the model can output an apply_patch call #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum ApplyPatchModelOutput { @@ -326,6 +331,7 @@ pub struct TestCodexBuilder { config_mutators: Vec>, auth: CodexAuth, pre_build_hooks: Vec>, + workspace_setups: Vec>, home: Option>, user_shell_override: Option, } @@ -359,6 +365,16 @@ impl TestCodexBuilder { self } + pub fn with_workspace_setup(mut self, setup: F) -> Self + where + F: FnOnce(AbsolutePathBuf, Arc) -> Fut + Send + 'static, + Fut: Future> + Send + 'static, + { + self.workspace_setups + .push(Box::new(move |cwd, fs| Box::pin(setup(cwd, fs)))); + self + } + pub fn with_home(mut self, home: Arc) -> Self { self.home = Some(home); self @@ -382,25 +398,24 @@ impl TestCodexBuilder { Some(home) => home, None => Arc::new(TempDir::new()?), }; - Box::pin(self.build_with_home(server, home, /*resume_from*/ None)).await + let base_url = format!("{}/v1", server.uri()); + let test_env = TestEnv::local().await?; + Box::pin(self.build_with_home_and_base_url(base_url, home, /*resume_from*/ None, test_env)) + .await } pub async fn build_remote_aware( &mut self, server: &wiremock::MockServer, ) -> anyhow::Result { - let test_env = test_env().await?; let home = match self.home.clone() { Some(home) => home, None => Arc::new(TempDir::new()?), }; let base_url = format!("{}/v1", server.uri()); - let cwd = test_env.cwd.to_path_buf(); - self.config_mutators.push(Box::new(move |config| { - config.cwd = cwd.abs(); - })); - let (config, cwd) = self.prepare_config(base_url, &home).await?; - Box::pin(self.build_from_config(config, cwd, home, /*resume_from*/ None, test_env)).await + let test_env = test_env().await?; + Box::pin(self.build_with_home_and_base_url(base_url, home, /*resume_from*/ None, test_env)) + .await } pub async fn build_with_streaming_server( @@ -412,10 +427,12 @@ impl TestCodexBuilder { Some(home) => home, None => Arc::new(TempDir::new()?), }; + let test_env = TestEnv::local().await?; Box::pin(self.build_with_home_and_base_url( format!("{base_url}/v1"), home, /*resume_from*/ None, + test_env, )) .await } @@ -435,7 +452,9 @@ impl TestCodexBuilder { config.model_provider.supports_websockets = true; config.experimental_realtime_ws_model = Some("realtime-test-model".to_string()); })); - Box::pin(self.build_with_home_and_base_url(base_url, home, /*resume_from*/ None)).await + let test_env = TestEnv::local().await?; + Box::pin(self.build_with_home_and_base_url(base_url, home, /*resume_from*/ None, test_env)) + .await } pub async fn resume( @@ -443,19 +462,10 @@ impl TestCodexBuilder { server: &wiremock::MockServer, home: Arc, rollout_path: PathBuf, - ) -> anyhow::Result { - Box::pin(self.build_with_home(server, home, Some(rollout_path))).await - } - - async fn build_with_home( - &mut self, - server: &wiremock::MockServer, - home: Arc, - resume_from: Option, ) -> anyhow::Result { let base_url = format!("{}/v1", server.uri()); - let (config, cwd) = self.prepare_config(base_url, &home).await?; - Box::pin(self.build_from_config(config, cwd, home, resume_from, TestEnv::local().await?)) + let test_env = TestEnv::local().await?; + Box::pin(self.build_with_home_and_base_url(base_url, home, Some(rollout_path), test_env)) .await } @@ -464,10 +474,30 @@ impl TestCodexBuilder { base_url: String, home: Arc, resume_from: Option, + test_env: TestEnv, ) -> anyhow::Result { - let (config, cwd) = self.prepare_config(base_url, &home).await?; - Box::pin(self.build_from_config(config, cwd, home, resume_from, TestEnv::local().await?)) - .await + let (config, fallback_cwd) = self + .prepare_config(base_url, &home, test_env.cwd().clone()) + .await?; + let environment_manager = Arc::new(codex_exec_server::EnvironmentManager::new( + test_env.exec_server_url().map(str::to_owned), + )); + let file_system = test_env.environment().get_filesystem(); + let mut workspace_setups = vec![]; + swap(&mut self.workspace_setups, &mut workspace_setups); + for setup in workspace_setups { + setup(config.cwd.clone(), Arc::clone(&file_system)).await?; + } + let cwd = test_env.local_cwd_temp_dir().unwrap_or(fallback_cwd); + Box::pin(self.build_from_config( + config, + cwd, + home, + resume_from, + test_env, + environment_manager, + )) + .await } async fn build_from_config( @@ -477,11 +507,9 @@ impl TestCodexBuilder { home: Arc, resume_from: Option, test_env: TestEnv, + environment_manager: Arc, ) -> anyhow::Result { let auth = self.auth.clone(); - let environment_manager = Arc::new(codex_exec_server::EnvironmentManager::new( - test_env.exec_server_url().map(str::to_owned), - )); let thread_manager = if config.model_catalog.is_some() { ThreadManager::new( &config, @@ -553,6 +581,7 @@ impl TestCodexBuilder { &mut self, base_url: String, home: &TempDir, + cwd_override: AbsolutePathBuf, ) -> anyhow::Result<(Config, Arc)> { let model_provider = ModelProviderInfo { base_url: Some(base_url), @@ -563,7 +592,7 @@ impl TestCodexBuilder { }; let cwd = Arc::new(TempDir::new()?); let mut config = load_default_config_for_test(home).await; - config.cwd = cwd.abs(); + config.cwd = cwd_override; config.model_provider = model_provider; for hook in self.pre_build_hooks.drain(..) { hook(home.path()); @@ -897,6 +926,7 @@ pub fn test_codex() -> TestCodexBuilder { config_mutators: vec![], auth: CodexAuth::from_api_key("dummy"), pre_build_hooks: vec![], + workspace_setups: vec![], home: None, user_shell_override: None, } diff --git a/codex-rs/core/tests/common/test_codex_exec.rs b/codex-rs/core/tests/common/test_codex_exec.rs index 5fe47018a6..ad32bcb025 100644 --- a/codex-rs/core/tests/common/test_codex_exec.rs +++ b/codex-rs/core/tests/common/test_codex_exec.rs @@ -17,6 +17,7 @@ impl TestCodexExecBuilder { ); cmd.current_dir(self.cwd.path()) .env("CODEX_HOME", self.home.path()) + .env("CODEX_SQLITE_HOME", self.home.path()) .env(CODEX_API_KEY_ENV_VAR, "dummy"); cmd } diff --git a/codex-rs/core/tests/responses_headers.rs b/codex-rs/core/tests/responses_headers.rs index 7a6e73ed08..03870beab2 100644 --- a/codex-rs/core/tests/responses_headers.rs +++ b/codex-rs/core/tests/responses_headers.rs @@ -31,6 +31,8 @@ fn normalize_git_remote_url(url: &str) -> String { .to_string() } +const TEST_INSTALLATION_ID: &str = "11111111-1111-4111-8111-111111111111"; + #[tokio::test] async fn responses_stream_includes_subagent_header_on_review() { core_test_support::skip_if_no_network!(); @@ -98,6 +100,7 @@ async fn responses_stream_includes_subagent_header_on_review() { let client = ModelClient::new( /*auth_manager*/ None, conversation_id, + /*installation_id*/ TEST_INSTALLATION_ID.to_string(), provider.clone(), session_source, config.model_verbosity, @@ -147,6 +150,10 @@ async fn responses_stream_includes_subagent_header_on_review() { Some(expected_window_id.as_str()) ); assert_eq!(request.header("x-codex-parent-thread-id"), None); + assert_eq!( + request.body_json()["client_metadata"]["x-codex-installation-id"].as_str(), + Some(TEST_INSTALLATION_ID) + ); assert_eq!(request.header("x-codex-sandbox"), None); } @@ -218,6 +225,7 @@ async fn responses_stream_includes_subagent_header_on_other() { let client = ModelClient::new( /*auth_manager*/ None, conversation_id, + /*installation_id*/ TEST_INSTALLATION_ID.to_string(), provider.clone(), session_source, config.model_verbosity, @@ -331,6 +339,7 @@ async fn responses_respects_model_info_overrides_from_config() { let client = ModelClient::new( /*auth_manager*/ None, conversation_id, + /*installation_id*/ TEST_INSTALLATION_ID.to_string(), provider.clone(), session_source, config.model_verbosity, diff --git a/codex-rs/core/tests/suite/agents_md.rs b/codex-rs/core/tests/suite/agents_md.rs new file mode 100644 index 0000000000..ed62c90388 --- /dev/null +++ b/codex-rs/core/tests/suite/agents_md.rs @@ -0,0 +1,121 @@ +use anyhow::Result; +use codex_exec_server::CreateDirectoryOptions; +use core_test_support::responses::ev_completed; +use core_test_support::responses::ev_response_created; +use core_test_support::responses::mount_sse_once; +use core_test_support::responses::sse; +use core_test_support::responses::start_mock_server; +use core_test_support::test_codex::TestCodexBuilder; +use core_test_support::test_codex::test_codex; + +async fn agents_instructions(mut builder: TestCodexBuilder) -> Result { + let server = start_mock_server().await; + let resp_mock = mount_sse_once( + &server, + sse(vec![ev_response_created("resp1"), ev_completed("resp1")]), + ) + .await; + + let test = builder.build_remote_aware(&server).await?; + test.submit_turn("hello").await?; + + let request = resp_mock.single_request(); + request + .message_input_texts("user") + .into_iter() + .find(|text| text.starts_with("# AGENTS.md instructions for ")) + .ok_or_else(|| anyhow::anyhow!("instructions message not found")) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn agents_override_is_preferred_over_agents_md() -> Result<()> { + let instructions = + agents_instructions(test_codex().with_workspace_setup(|cwd, fs| async move { + let agents_md = cwd.join("AGENTS.md"); + let override_md = cwd.join("AGENTS.override.md"); + fs.write_file(&agents_md, b"base doc".to_vec()).await?; + fs.write_file(&override_md, b"override doc".to_vec()) + .await?; + Ok::<(), anyhow::Error>(()) + })) + .await?; + + assert!( + instructions.contains("override doc"), + "expected AGENTS.override.md contents: {instructions}" + ); + assert!( + !instructions.contains("base doc"), + "expected AGENTS.md to be ignored when override exists: {instructions}" + ); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn configured_fallback_is_used_when_agents_candidate_is_directory() -> Result<()> { + let instructions = agents_instructions( + test_codex() + .with_config(|config| { + config.project_doc_fallback_filenames = vec!["WORKFLOW.md".to_string()]; + }) + .with_workspace_setup(|cwd, fs| async move { + let agents_dir = cwd.join("AGENTS.md"); + let fallback = cwd.join("WORKFLOW.md"); + fs.create_directory(&agents_dir, CreateDirectoryOptions { recursive: true }) + .await?; + fs.write_file(&fallback, b"fallback doc".to_vec()).await?; + Ok::<(), anyhow::Error>(()) + }), + ) + .await?; + + assert!( + instructions.contains("fallback doc"), + "expected fallback doc contents: {instructions}" + ); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn agents_docs_are_concatenated_from_project_root_to_cwd() -> Result<()> { + let instructions = agents_instructions( + test_codex() + .with_config(|config| { + config.cwd = config.cwd.join("nested/workspace"); + }) + .with_workspace_setup(|cwd, fs| async move { + let nested = cwd.clone(); + let root = nested + .parent() + .and_then(|parent| parent.parent()) + .expect("nested workspace should have a project root ancestor"); + let root_agents = root.join("AGENTS.md"); + let git_marker = root.join(".git"); + let nested_agents = nested.join("AGENTS.md"); + + fs.create_directory(&nested, CreateDirectoryOptions { recursive: true }) + .await?; + fs.write_file(&root_agents, b"root doc".to_vec()).await?; + fs.write_file(&git_marker, b"gitdir: /tmp/mock-git-dir\n".to_vec()) + .await?; + fs.write_file(&nested_agents, b"child doc".to_vec()).await?; + Ok::<(), anyhow::Error>(()) + }), + ) + .await?; + + let root_pos = instructions + .find("root doc") + .expect("expected root doc in AGENTS instructions"); + let child_pos = instructions + .find("child doc") + .expect("expected child doc in AGENTS instructions"); + assert!( + root_pos < child_pos, + "expected root doc before child doc: {instructions}" + ); + + Ok(()) +} diff --git a/codex-rs/core/tests/suite/client.rs b/codex-rs/core/tests/suite/client.rs index 0148d62bfe..15303a58ff 100644 --- a/codex-rs/core/tests/suite/client.rs +++ b/codex-rs/core/tests/suite/client.rs @@ -1,10 +1,10 @@ +use codex_config::types::AuthCredentialsStoreMode; use codex_core::ModelClient; use codex_core::NewThread; use codex_core::Prompt; use codex_core::ResponseEvent; use codex_core::ThreadManager; use codex_features::Feature; -use codex_login::AuthCredentialsStoreMode; use codex_login::AuthManager; use codex_login::CodexAuth; use codex_login::default_client::originator; @@ -81,6 +81,8 @@ use wiremock::matchers::method; use wiremock::matchers::path; use wiremock::matchers::query_param; +const INSTALLATION_ID_FILENAME: &str = "installation_id"; + #[expect(clippy::unwrap_used)] fn assert_message_role(request_body: &serde_json::Value, role: &str) { assert_eq!(request_body["role"].as_str().unwrap(), role); @@ -760,10 +762,18 @@ async fn includes_conversation_id_and_model_headers_in_request() { .header("authorization") .expect("authorization header"); let request_originator = request.header("originator").expect("originator header"); + let request_body = request.body_json(); + let installation_id = + std::fs::read_to_string(test.codex_home_path().join(INSTALLATION_ID_FILENAME)) + .expect("read installation id"); assert_eq!(request_session_id, session_id.to_string()); assert_eq!(request_originator, originator().value); assert_eq!(request_authorization, "Bearer Test API Key"); + assert_eq!( + request_body["client_metadata"]["x-codex-installation-id"].as_str(), + Some(installation_id.as_str()) + ); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -868,6 +878,7 @@ async fn send_provider_auth_request(server: &MockServer, auth: ModelProviderAuth "unused-api-key", ))), conversation_id, + /*installation_id*/ "11111111-1111-4111-8111-111111111111".to_string(), provider, SessionSource::Exec, config.model_verbosity, @@ -921,7 +932,7 @@ async fn includes_base_instructions_override_in_request() { let mut builder = test_codex() .with_auth(CodexAuth::from_api_key("Test API Key")) .with_config(|config| { - config.base_instructions = Some("test instructions".to_string()); + config.base_instructions = Some(Some("test instructions".to_string())); }); let codex = builder .build(&server) @@ -953,6 +964,47 @@ async fn includes_base_instructions_override_in_request() { ); } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn omits_explicit_null_base_instructions_from_request() { + skip_if_no_network!(); + // Mock server + let server = MockServer::start().await; + let resp_mock = mount_sse_once( + &server, + sse(vec![ev_response_created("resp1"), ev_completed("resp1")]), + ) + .await; + + let mut builder = test_codex() + .with_auth(CodexAuth::from_api_key("Test API Key")) + .with_config(|config| { + config.base_instructions = Some(None); + }); + let codex = builder + .build(&server) + .await + .expect("create new conversation") + .codex; + + codex + .submit(Op::UserInput { + items: vec![UserInput::Text { + text: "hello".into(), + text_elements: Vec::new(), + }], + final_output_json_schema: None, + }) + .await + .unwrap(); + + wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await; + + let request = resp_mock.single_request(); + let request_body = request.body_json(); + + assert_eq!(request_body.get("instructions"), None); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn chatgpt_auth_sends_correct_request() { skip_if_no_network!(); @@ -1007,11 +1059,18 @@ async fn chatgpt_auth_sends_correct_request() { let request_body = request.body_json(); let session_id = request.header("session_id").expect("session_id header"); + let installation_id = + std::fs::read_to_string(test.codex_home_path().join(INSTALLATION_ID_FILENAME)) + .expect("read installation id"); assert_eq!(session_id, thread_id.to_string()); assert_eq!(request_originator, originator().value); assert_eq!(request_authorization, "Bearer Access Token"); assert_eq!(request_chatgpt_account_id, "account_id"); + assert_eq!( + request_body["client_metadata"]["x-codex-installation-id"].as_str(), + Some(installation_id.as_str()) + ); assert!(request_body["stream"].as_bool().unwrap()); assert_eq!( request_body["include"][0].as_str().unwrap(), @@ -2138,6 +2197,7 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() { let client = ModelClient::new( /*auth_manager*/ None, conversation_id, + /*installation_id*/ "11111111-1111-4111-8111-111111111111".to_string(), provider.clone(), SessionSource::Exec, config.model_verbosity, diff --git a/codex-rs/core/tests/suite/client_websockets.rs b/codex-rs/core/tests/suite/client_websockets.rs index d348d4a022..7ff4a9f88d 100755 --- a/codex-rs/core/tests/suite/client_websockets.rs +++ b/codex-rs/core/tests/suite/client_websockets.rs @@ -10,11 +10,11 @@ use codex_features::Feature; use codex_login::CodexAuth; use codex_model_provider_info::ModelProviderInfo; use codex_model_provider_info::WireApi; +use codex_otel::MetricsClient; +use codex_otel::MetricsConfig; use codex_otel::SessionTelemetry; use codex_otel::TelemetryAuthMode; use codex_otel::current_span_w3c_trace_context; -use codex_otel::metrics::MetricsClient; -use codex_otel::metrics::MetricsConfig; use codex_protocol::ThreadId; use codex_protocol::account::PlanType; use codex_protocol::config_types::ReasoningSummary; @@ -55,6 +55,7 @@ const MODEL: &str = "gpt-5.2-codex"; const OPENAI_BETA_HEADER: &str = "OpenAI-Beta"; const WS_V2_BETA_HEADER_VALUE: &str = "responses_websockets=2026-02-06"; const X_CLIENT_REQUEST_ID_HEADER: &str = "x-client-request-id"; +const TEST_INSTALLATION_ID: &str = "11111111-1111-4111-8111-111111111111"; fn assert_request_trace_matches(body: &serde_json::Value, expected_trace: &W3cTraceContext) { let client_metadata = body["client_metadata"] @@ -125,6 +126,10 @@ async fn responses_websocket_streams_request() { handshake.header(X_CLIENT_REQUEST_ID_HEADER), Some(harness.conversation_id.to_string()) ); + assert_eq!( + body["client_metadata"]["x-codex-installation-id"].as_str(), + Some(TEST_INSTALLATION_ID) + ); server.shutdown().await; } @@ -1654,9 +1659,9 @@ fn prompt_with_input(input: Vec) -> Prompt { fn prompt_with_input_and_instructions(input: Vec, instructions: &str) -> Prompt { let mut prompt = prompt_with_input(input); - prompt.base_instructions = BaseInstructions { + prompt.base_instructions = Some(BaseInstructions { text: instructions.to_string(), - }; + }); prompt } @@ -1756,6 +1761,7 @@ async fn websocket_harness_with_provider_options( let client = ModelClient::new( /*auth_manager*/ None, conversation_id, + /*installation_id*/ TEST_INSTALLATION_ID.to_string(), provider.clone(), SessionSource::Exec, config.model_verbosity, diff --git a/codex-rs/core/tests/suite/code_mode.rs b/codex-rs/core/tests/suite/code_mode.rs index 37ee27dd68..9ddc169a60 100644 --- a/codex-rs/core/tests/suite/code_mode.rs +++ b/codex-rs/core/tests/suite/code_mode.rs @@ -1626,6 +1626,34 @@ text({ json: true }); Ok(()) } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn code_mode_can_resume_after_set_timeout() -> Result<()> { + skip_if_no_network!(Ok(())); + + let server = responses::start_mock_server().await; + let (_test, second_mock) = run_code_mode_turn( + &server, + "use exec to wait for a timeout", + r#" +await new Promise((resolve) => setTimeout(resolve, 10)); +text("timer done"); +"#, + /*include_apply_patch*/ false, + ) + .await?; + + let req = second_mock.single_request(); + let (output, success) = custom_tool_output_body_and_success(&req, "call-1"); + assert_ne!( + success, + Some(false), + "exec setTimeout call failed unexpectedly: {output}" + ); + assert_eq!(output, "timer done"); + + Ok(()) +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn code_mode_notify_injects_additional_exec_tool_output_into_active_context() -> Result<()> { skip_if_no_network!(Ok(())); @@ -2099,6 +2127,7 @@ text(JSON.stringify(Object.getOwnPropertyNames(globalThis).sort())); "BigInt64Array", "BigUint64Array", "Boolean", + "clearTimeout", "DataView", "Date", "DisposableStack", @@ -2161,6 +2190,7 @@ text(JSON.stringify(Object.getOwnPropertyNames(globalThis).sort())); "notify", "parseFloat", "parseInt", + "setTimeout", "store", "text", "tools", @@ -2578,3 +2608,51 @@ text(JSON.stringify(load("nb"))); Ok(()) } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn code_mode_can_compare_elapsed_time_around_set_timeout() -> Result<()> { + skip_if_no_network!(Ok(())); + + let server = responses::start_mock_server().await; + let (_test, second_mock) = run_code_mode_turn( + &server, + "measure elapsed time around setTimeout", + r#" +const start_ms = Date.now(); +await new Promise((resolve) => setTimeout(resolve, 100)); +const end_ms = Date.now(); +text(JSON.stringify({ + start_ms, + end_ms, + elapsed_ms: end_ms - start_ms, + waited_long_enough: end_ms - start_ms >= 100, +})); +"#, + /*include_apply_patch*/ false, + ) + .await?; + + let second_request = second_mock.single_request(); + let (second_output, second_success) = + custom_tool_output_body_and_success(&second_request, "call-1"); + assert_ne!( + second_success, + Some(false), + "exec compare time call failed unexpectedly: {second_output}" + ); + let compared: Value = serde_json::from_str( + &custom_tool_output_last_non_empty_text(&second_request, "call-1") + .expect("exec compare time call should emit JSON"), + )?; + let elapsed_ms = compared + .get("elapsed_ms") + .and_then(Value::as_i64) + .expect("elapsed_ms should be an integer"); + assert!( + elapsed_ms >= 100, + "expected elapsed_ms >= 100, got {elapsed_ms}" + ); + assert_eq!(compared.get("waited_long_enough"), Some(&Value::Bool(true))); + + Ok(()) +} diff --git a/codex-rs/core/tests/suite/compact_remote.rs b/codex-rs/core/tests/suite/compact_remote.rs index 0069c5fb86..dd99e0f391 100644 --- a/codex-rs/core/tests/suite/compact_remote.rs +++ b/codex-rs/core/tests/suite/compact_remote.rs @@ -869,7 +869,7 @@ async fn remote_compact_trim_estimate_uses_session_base_instructions() -> Result let override_base_instructions = override_base_instructions.clone(); move |config| { config.model_context_window = Some(override_context_window); - config.base_instructions = Some(override_base_instructions); + config.base_instructions = Some(Some(override_base_instructions)); } }), ) diff --git a/codex-rs/core/tests/suite/compact_resume_fork.rs b/codex-rs/core/tests/suite/compact_resume_fork.rs index d47ed7c94c..8f4f2ae755 100644 --- a/codex-rs/core/tests/suite/compact_resume_fork.rs +++ b/codex-rs/core/tests/suite/compact_resume_fork.rs @@ -567,7 +567,7 @@ async fn snapshot_rollback_followup_turn_trims_context_updates() -> Result<()> { user_turn(&conversation, TURN_ONE_USER).await; - let override_cwd = config.cwd.join(PRETURN_CONTEXT_DIFF_CWD)?; + let override_cwd = config.cwd.join(PRETURN_CONTEXT_DIFF_CWD); std::fs::create_dir_all(&override_cwd)?; conversation .submit(Op::OverrideTurnContext { diff --git a/codex-rs/core/tests/suite/hierarchical_agents.rs b/codex-rs/core/tests/suite/hierarchical_agents.rs index 8a3fadfbb0..f0960c0724 100644 --- a/codex-rs/core/tests/suite/hierarchical_agents.rs +++ b/codex-rs/core/tests/suite/hierarchical_agents.rs @@ -18,21 +18,22 @@ async fn hierarchical_agents_appends_to_project_doc_in_user_instructions() { ) .await; - let mut builder = test_codex().with_config(|config| { - config - .features - .enable(Feature::ChildAgentsMd) - .expect("test config should allow feature update"); - std::fs::write( + let mut builder = test_codex() + .with_config(|config| { config - .cwd - .join("AGENTS.md") - .expect("absolute AGENTS.md path"), - "be nice", - ) - .expect("write AGENTS.md"); - }); - let test = builder.build(&server).await.expect("build test codex"); + .features + .enable(Feature::ChildAgentsMd) + .expect("test config should allow feature update"); + }) + .with_workspace_setup(|cwd, fs| async move { + let agents_md = cwd.join("AGENTS.md"); + fs.write_file(&agents_md, b"be nice".to_vec()).await?; + Ok::<(), anyhow::Error>(()) + }); + let test = builder + .build_remote_aware(&server) + .await + .expect("build test codex"); test.submit_turn("hello").await.expect("submit turn"); @@ -73,7 +74,10 @@ async fn hierarchical_agents_emits_when_no_project_doc() { .enable(Feature::ChildAgentsMd) .expect("test config should allow feature update"); }); - let test = builder.build(&server).await.expect("build test codex"); + let test = builder + .build_remote_aware(&server) + .await + .expect("build test codex"); test.submit_turn("hello").await.expect("submit turn"); diff --git a/codex-rs/core/tests/suite/live_reload.rs b/codex-rs/core/tests/suite/live_reload.rs index 663cf47488..6ab001383f 100644 --- a/codex-rs/core/tests/suite/live_reload.rs +++ b/codex-rs/core/tests/suite/live_reload.rs @@ -6,7 +6,7 @@ use std::path::PathBuf; use std::time::Duration; use anyhow::Result; -use codex_core::config::ProjectConfig; +use codex_config::config_toml::ProjectConfig; use codex_protocol::config_types::TrustLevel; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::EventMsg; diff --git a/codex-rs/core/tests/suite/mod.rs b/codex-rs/core/tests/suite/mod.rs index 4d58537e02..78dcf748a3 100644 --- a/codex-rs/core/tests/suite/mod.rs +++ b/codex-rs/core/tests/suite/mod.rs @@ -74,6 +74,7 @@ pub static CODEX_ALIASES_TEMP_DIR: Option = { mod abort_tasks; mod agent_jobs; mod agent_websocket; +mod agents_md; mod apply_patch_cli; #[cfg(not(target_os = "windows"))] mod approvals; diff --git a/codex-rs/core/tests/suite/pending_input.rs b/codex-rs/core/tests/suite/pending_input.rs index 2fc5bf4e34..25a08120ae 100644 --- a/codex-rs/core/tests/suite/pending_input.rs +++ b/codex-rs/core/tests/suite/pending_input.rs @@ -101,6 +101,19 @@ async fn submit_user_input(codex: &CodexThread, text: &str) { .unwrap_or_else(|err| panic!("submit user input: {err}")); } +async fn steer_user_input(codex: &CodexThread, text: &str) { + codex + .steer_input( + vec![UserInput::Text { + text: text.to_string(), + text_elements: Vec::new(), + }], + /*expected_turn_id*/ None, + ) + .await + .unwrap_or_else(|err| panic!("steer user input: {err:?}")); +} + async fn submit_queue_only_agent_mail(codex: &CodexThread, text: &str) { codex .submit(Op::InterAgentCommunication { @@ -410,7 +423,7 @@ async fn user_input_does_not_preempt_after_reasoning_item() { wait_for_reasoning_item_started(&codex).await; - submit_user_input(&codex, "second prompt").await; + steer_user_input(&codex, "second prompt").await; let _ = gate_reasoning_done_tx.send(()); diff --git a/codex-rs/core/tests/suite/personality.rs b/codex-rs/core/tests/suite/personality.rs index 172738ffd1..8a0cbdcb63 100644 --- a/codex-rs/core/tests/suite/personality.rs +++ b/codex-rs/core/tests/suite/personality.rs @@ -66,7 +66,7 @@ async fn base_instructions_override_disables_personality_template() { .enable(Feature::Personality) .expect("test config should allow feature update"); config.personality = Some(Personality::Friendly); - config.base_instructions = Some("override instructions".to_string()); + config.base_instructions = Some(Some("override instructions".to_string())); let model_info = codex_core::test_support::construct_model_info_offline("gpt-5.2-codex", &config); diff --git a/codex-rs/core/tests/suite/personality_migration.rs b/codex-rs/core/tests/suite/personality_migration.rs index 0a8dd61d9c..3ce6925f41 100644 --- a/codex-rs/core/tests/suite/personality_migration.rs +++ b/codex-rs/core/tests/suite/personality_migration.rs @@ -1,6 +1,6 @@ +use codex_config::config_toml::ConfigToml; use codex_core::ARCHIVED_SESSIONS_SUBDIR; use codex_core::SESSIONS_SUBDIR; -use codex_core::config::ConfigToml; use codex_core::personality_migration::PERSONALITY_MIGRATION_FILENAME; use codex_core::personality_migration::PersonalityMigrationStatus; use codex_core::personality_migration::maybe_migrate_personality; @@ -71,6 +71,7 @@ async fn write_rollout_with_user_event(dir: &Path, thread_id: ThreadId) -> io::R agent_role: None, model_provider: None, base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, }, @@ -116,6 +117,7 @@ async fn write_rollout_with_meta_only(dir: &Path, thread_id: ThreadId) -> io::Re agent_role: None, model_provider: None, base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, }, diff --git a/codex-rs/core/tests/suite/remote_env.rs b/codex-rs/core/tests/suite/remote_env.rs index 0dd7718d3a..4cd9568a58 100644 --- a/codex-rs/core/tests/suite/remote_env.rs +++ b/codex-rs/core/tests/suite/remote_env.rs @@ -1,6 +1,6 @@ use anyhow::Result; use codex_exec_server::RemoveOptions; -use codex_utils_absolute_path::AbsolutePathBuf; +use core_test_support::PathBufExt; use core_test_support::get_remote_test_env; use core_test_support::test_codex::test_env; use pretty_assertions::assert_eq; @@ -17,8 +17,7 @@ async fn remote_test_env_can_connect_and_use_filesystem() -> Result<()> { let test_env = test_env().await?; let file_system = test_env.environment().get_filesystem(); - let file_path = remote_test_file_path(); - let file_path_abs = absolute_path(file_path.clone())?; + let file_path_abs = remote_test_file_path().abs(); let payload = b"remote-test-env-ok".to_vec(); file_system @@ -39,12 +38,6 @@ async fn remote_test_env_can_connect_and_use_filesystem() -> Result<()> { Ok(()) } - -fn absolute_path(path: PathBuf) -> Result { - AbsolutePathBuf::try_from(path.clone()) - .map_err(|err| anyhow::anyhow!("invalid absolute path {}: {err}", path.display())) -} - fn remote_test_file_path() -> PathBuf { let nanos = match SystemTime::now().duration_since(UNIX_EPOCH) { Ok(duration) => duration.as_nanos(), diff --git a/codex-rs/core/tests/suite/resume_warning.rs b/codex-rs/core/tests/suite/resume_warning.rs index 0252a99271..f9ad64c771 100644 --- a/codex-rs/core/tests/suite/resume_warning.rs +++ b/codex-rs/core/tests/suite/resume_warning.rs @@ -53,6 +53,7 @@ fn resume_history( history: vec![ RolloutItem::EventMsg(EventMsg::TurnStarted(TurnStartedEvent { turn_id: turn_id.clone(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, })), @@ -66,6 +67,8 @@ fn resume_history( RolloutItem::EventMsg(EventMsg::TurnComplete(TurnCompleteEvent { turn_id, last_agent_message: None, + completed_at: None, + duration_ms: None, })), ], rollout_path: rollout_path.to_path_buf(), diff --git a/codex-rs/core/tests/suite/rollout_list_find.rs b/codex-rs/core/tests/suite/rollout_list_find.rs index 7445a4ba05..1954654266 100644 --- a/codex-rs/core/tests/suite/rollout_list_find.rs +++ b/codex-rs/core/tests/suite/rollout_list_find.rs @@ -173,7 +173,8 @@ async fn find_locates_rollout_file_written_by_recorder() -> std::io::Result<()> thread_id, /*forked_from_id*/ None, SessionSource::Exec, - BaseInstructions::default(), + Some(BaseInstructions::default()), + /*developer_instructions*/ None, Vec::new(), EventPersistenceMode::Limited, ), diff --git a/codex-rs/core/tests/suite/sqlite_state.rs b/codex-rs/core/tests/suite/sqlite_state.rs index f35152e185..d99c3865c7 100644 --- a/codex-rs/core/tests/suite/sqlite_state.rs +++ b/codex-rs/core/tests/suite/sqlite_state.rs @@ -146,6 +146,7 @@ async fn backfill_scans_existing_rollouts() -> Result<()> { agent_role: None, model_provider: None, base_instructions: None, + developer_instructions: None, dynamic_tools: Some(dynamic_tools_for_hook), memory_mode: None, }, diff --git a/codex-rs/core/tests/suite/stream_error_allows_next_turn.rs b/codex-rs/core/tests/suite/stream_error_allows_next_turn.rs index 19d9d27cf7..c5aa57ce74 100644 --- a/codex-rs/core/tests/suite/stream_error_allows_next_turn.rs +++ b/codex-rs/core/tests/suite/stream_error_allows_next_turn.rs @@ -84,7 +84,7 @@ async fn continue_after_stream_error() { let TestCodex { codex, .. } = test_codex() .with_config(move |config| { - config.base_instructions = Some("You are a helpful assistant".to_string()); + config.base_instructions = Some(Some("You are a helpful assistant".to_string())); config.model_provider = provider; }) .build(&server) diff --git a/codex-rs/core/tests/suite/unified_exec.rs b/codex-rs/core/tests/suite/unified_exec.rs index 2f72277086..7470717a04 100644 --- a/codex-rs/core/tests/suite/unified_exec.rs +++ b/codex-rs/core/tests/suite/unified_exec.rs @@ -1,9 +1,11 @@ use std::collections::HashMap; use std::ffi::OsStr; use std::fs; +use std::sync::OnceLock; use anyhow::Context; use anyhow::Result; +use codex_exec_server::CreateDirectoryOptions; use codex_features::Feature; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::EventMsg; @@ -32,10 +34,10 @@ use core_test_support::wait_for_event; use core_test_support::wait_for_event_match; use core_test_support::wait_for_event_with_timeout; use pretty_assertions::assert_eq; +use regex_lite::Regex; use serde_json::Value; use serde_json::json; use tokio::time::Duration; -use which::which; fn extract_output_text(item: &Value) -> Option<&str> { item.get("output").and_then(|value| match value { @@ -57,49 +59,65 @@ struct ParsedUnifiedExecOutput { #[allow(clippy::expect_used)] fn parse_unified_exec_output(raw: &str) -> Result { - let cleaned = raw.replace("\r\n", "\n"); - let (metadata, output) = cleaned - .rsplit_once("\nOutput:") + static OUTPUT_REGEX: OnceLock = OnceLock::new(); + let regex = OUTPUT_REGEX.get_or_init(|| { + Regex::new(concat!( + r#"(?s)^(?:Total output lines: \d+\n\n)?"#, + r#"(?:Chunk ID: (?P[^\n]+)\n)?"#, + r#"Wall time: (?P-?\d+(?:\.\d+)?) seconds\n"#, + r#"(?:Process exited with code (?P-?\d+)\n)?"#, + r#"(?:Process running with session ID (?P-?\d+)\n)?"#, + r#"(?:Original token count: (?P\d+)\n)?"#, + r#"Output:\n?(?P.*)$"#, + )) + .expect("valid unified exec output regex") + }); + + let cleaned = raw.trim_matches('\r'); + let captures = regex + .captures(cleaned) .ok_or_else(|| anyhow::anyhow!("missing Output section in unified exec output {raw}"))?; - let output = output.strip_prefix('\n').unwrap_or(output); - let mut chunk_id = None; - let mut wall_time_seconds = None; - let mut process_id = None; - let mut exit_code = None; - let mut original_token_count = None; + let chunk_id = captures + .name("chunk_id") + .map(|value| value.as_str().to_string()); - for line in metadata.lines() { - if let Some(value) = line.strip_prefix("Chunk ID: ") { - chunk_id = Some(value.to_string()); - } else if let Some(value) = line.strip_prefix("Wall time: ") { - let value = value.strip_suffix(" seconds").ok_or_else(|| { - anyhow::anyhow!("invalid wall time line in unified exec output: {line}") - })?; - wall_time_seconds = Some( - value - .parse::() - .context("failed to parse wall time seconds")?, - ); - } else if let Some(value) = line.strip_prefix("Process exited with code ") { - exit_code = Some( - value - .parse::() - .context("failed to parse exit code from unified exec output")?, - ); - } else if let Some(value) = line.strip_prefix("Process running with session ID ") { - process_id = Some(value.to_string()); - } else if let Some(value) = line.strip_prefix("Original token count: ") { - original_token_count = Some( - value - .parse::() - .context("failed to parse original token count from unified exec output")?, - ); - } - } + let wall_time_seconds = captures + .name("wall_time") + .expect("wall_time group present") + .as_str() + .parse::() + .context("failed to parse wall time seconds")?; - let wall_time_seconds = wall_time_seconds - .ok_or_else(|| anyhow::anyhow!("missing wall time in unified exec output {raw}"))?; + let exit_code = captures + .name("exit_code") + .map(|value| { + value + .as_str() + .parse::() + .context("failed to parse exit code from unified exec output") + }) + .transpose()?; + + let process_id = captures + .name("process_id") + .map(|value| value.as_str().to_string()); + + let original_token_count = captures + .name("original_token_count") + .map(|value| { + value + .as_str() + .parse::() + .context("failed to parse original token count from unified exec output") + }) + .transpose()?; + + let output = captures + .name("output") + .expect("output group present") + .as_str() + .to_string(); Ok(ParsedUnifiedExecOutput { chunk_id, @@ -107,7 +125,7 @@ fn parse_unified_exec_output(raw: &str) -> Result { process_id, exit_code, original_token_count, - output: output.to_string(), + output, }) } @@ -137,6 +155,47 @@ fn collect_tool_outputs(bodies: &[Value]) -> Result Result<()> { + let session_model = test.session_configured.model.clone(); + + test.codex + .submit(Op::UserTurn { + items: vec![UserInput::Text { + text: prompt.into(), + text_elements: Vec::new(), + }], + final_output_json_schema: None, + cwd: test.config.cwd.to_path_buf(), + approval_policy: AskForApproval::Never, + approvals_reviewer: None, + sandbox_policy, + model: session_model, + effort: None, + summary: None, + service_tier: None, + collaboration_mode: None, + personality: None, + }) + .await?; + + Ok(()) +} + +async fn create_workspace_directory( + test: &TestCodex, + rel_path: impl AsRef, +) -> Result { + let abs_path = test.config.cwd.join(rel_path.as_ref()); + test.fs() + .create_directory(&abs_path, CreateDirectoryOptions { recursive: true }) + .await?; + Ok(abs_path.into_path_buf()) +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn unified_exec_intercepts_apply_patch_exec_command() -> Result<()> { skip_if_no_network!(Ok(())); @@ -287,12 +346,8 @@ async fn unified_exec_emits_exec_command_begin_event() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; + let cwd = test.config.cwd.to_path_buf(); let call_id = "uexec-begin-event"; let args = json!({ @@ -315,29 +370,9 @@ async fn unified_exec_emits_exec_command_begin_event() -> Result<()> { ]; mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn(&test, "emit begin event", SandboxPolicy::DangerFullAccess).await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "emit begin event".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - let begin_event = wait_for_event_match(&codex, |msg| match msg { + let begin_event = wait_for_event_match(&test.codex, |msg| match msg { EventMsg::ExecCommandBegin(event) if event.call_id == call_id => Some(event.clone()), _ => None, }) @@ -345,9 +380,12 @@ async fn unified_exec_emits_exec_command_begin_event() -> Result<()> { assert_command(&begin_event.command, "-lc", "/bin/echo hello unified exec"); - assert_eq!(begin_event.cwd, cwd.path()); + assert_eq!(begin_event.cwd, cwd); - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; Ok(()) } @@ -367,15 +405,10 @@ async fn unified_exec_resolves_relative_workdir() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let workdir_rel = std::path::PathBuf::from("uexec_relative_workdir"); - std::fs::create_dir_all(cwd.path().join(&workdir_rel))?; + let workdir = create_workspace_directory(&test, &workdir_rel).await?; let call_id = "uexec-workdir-relative"; let args = json!({ @@ -398,41 +431,28 @@ async fn unified_exec_resolves_relative_workdir() -> Result<()> { ]; mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn( + &test, + "run relative workdir test", + SandboxPolicy::DangerFullAccess, + ) + .await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "run relative workdir test".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - let begin_event = wait_for_event_match(&codex, |msg| match msg { + let begin_event = wait_for_event_match(&test.codex, |msg| match msg { EventMsg::ExecCommandBegin(event) if event.call_id == call_id => Some(event.clone()), _ => None, }) .await; assert_eq!( - begin_event.cwd, - cwd.path().join(workdir_rel), + begin_event.cwd, workdir, "exec_command cwd should resolve relative workdir against turn cwd", ); - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; Ok(()) } @@ -453,15 +473,9 @@ async fn unified_exec_respects_workdir_override() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; - let workdir = cwd.path().join("uexec_workdir_test"); - std::fs::create_dir_all(&workdir)?; + let workdir = create_workspace_directory(&test, "uexec_workdir_test").await?; let call_id = "uexec-workdir"; let args = json!({ @@ -484,29 +498,9 @@ async fn unified_exec_respects_workdir_override() -> Result<()> { ]; let request_log = mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn(&test, "run workdir test", SandboxPolicy::DangerFullAccess).await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "run workdir test".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - let begin_event = wait_for_event_match(&codex, |msg| match msg { + let begin_event = wait_for_event_match(&test.codex, |msg| match msg { EventMsg::ExecCommandBegin(event) if event.call_id == call_id => Some(event.clone()), _ => None, }) @@ -517,7 +511,10 @@ async fn unified_exec_respects_workdir_override() -> Result<()> { "exec_command cwd should reflect the requested workdir override" ); - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; let requests = request_log.requests(); assert!(!requests.is_empty(), "expected at least one POST request"); @@ -540,12 +537,7 @@ async fn unified_exec_emits_exec_command_end_event() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let call_id = "uexec-end-event"; let args = json!({ @@ -582,29 +574,9 @@ async fn unified_exec_emits_exec_command_end_event() -> Result<()> { ]; mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn(&test, "emit end event", SandboxPolicy::DangerFullAccess).await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "emit end event".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - let end_event = wait_for_event_match(&codex, |msg| match msg { + let end_event = wait_for_event_match(&test.codex, |msg| match msg { EventMsg::ExecCommandEnd(ev) if ev.call_id == call_id => Some(ev.clone()), _ => None, }) @@ -616,7 +588,10 @@ async fn unified_exec_emits_exec_command_end_event() -> Result<()> { "expected aggregated output to contain marker" ); - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; Ok(()) } @@ -635,12 +610,7 @@ async fn unified_exec_emits_output_delta_for_exec_command() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let call_id = "uexec-delta-1"; let args = json!({ @@ -662,29 +632,9 @@ async fn unified_exec_emits_output_delta_for_exec_command() -> Result<()> { ]; mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn(&test, "emit delta", SandboxPolicy::DangerFullAccess).await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "emit delta".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - let event = wait_for_event_match(&codex, |msg| match msg { + let event = wait_for_event_match(&test.codex, |msg| match msg { EventMsg::ExecCommandEnd(ev) if ev.call_id == call_id => Some(ev.clone()), _ => None, }) @@ -696,7 +646,10 @@ async fn unified_exec_emits_output_delta_for_exec_command() -> Result<()> { "delta chunk missing expected text: {text:?}", ); - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; Ok(()) } @@ -715,12 +668,7 @@ async fn unified_exec_full_lifecycle_with_background_end_event() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let call_id = "uexec-full-lifecycle"; // This timing force the long-standing PTY @@ -743,34 +691,19 @@ async fn unified_exec_full_lifecycle_with_background_end_event() -> Result<()> { ]; mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); - - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "exercise full unified exec lifecycle".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; + submit_unified_exec_turn( + &test, + "exercise full unified exec lifecycle", + SandboxPolicy::DangerFullAccess, + ) + .await?; let mut begin_event = None; let mut end_event = None; let mut task_completed = false; loop { - let msg = wait_for_event(&codex, |_| true).await; + let msg = wait_for_event(&test.codex, |_| true).await; match msg { EventMsg::ExecCommandBegin(ev) if ev.call_id == call_id => begin_event = Some(ev), EventMsg::ExecCommandEnd(ev) if ev.call_id == call_id => { @@ -830,12 +763,7 @@ async fn unified_exec_emits_terminal_interaction_for_write_stdin() -> Result<()> .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let open_call_id = "uexec-open"; let open_args = json!({ @@ -878,32 +806,12 @@ async fn unified_exec_emits_terminal_interaction_for_write_stdin() -> Result<()> ]; mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); - - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "stdin delta".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; + submit_unified_exec_turn(&test, "stdin delta", SandboxPolicy::DangerFullAccess).await?; let mut terminal_interaction = None; loop { - let msg = wait_for_event(&codex, |_| true).await; + let msg = wait_for_event(&test.codex, |_| true).await; match msg { EventMsg::TerminalInteraction(ev) if ev.call_id == open_call_id => { terminal_interaction = Some(ev); @@ -938,12 +846,7 @@ async fn unified_exec_terminal_interaction_captures_delayed_output() -> Result<( .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let open_call_id = "uexec-delayed-open"; let open_args = json!({ @@ -1020,27 +923,12 @@ async fn unified_exec_terminal_interaction_captures_delayed_output() -> Result<( ]; mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); - - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "delayed terminal interaction output".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; + submit_unified_exec_turn( + &test, + "delayed terminal interaction output", + SandboxPolicy::DangerFullAccess, + ) + .await?; let mut begin_event = None; let mut end_event = None; @@ -1050,7 +938,7 @@ async fn unified_exec_terminal_interaction_captures_delayed_output() -> Result<( // Consume all events for this turn so we can assert on each stage. loop { - let msg = wait_for_event(&codex, |_| true).await; + let msg = wait_for_event(&test.codex, |_| true).await; match msg { EventMsg::ExecCommandBegin(ev) if ev.call_id == open_call_id => { begin_event = Some(ev); @@ -1137,12 +1025,7 @@ async fn unified_exec_emits_one_begin_and_one_end_event() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let open_call_id = "uexec-open-session"; let open_args = json!({ @@ -1185,33 +1068,18 @@ async fn unified_exec_emits_one_begin_and_one_end_event() -> Result<()> { ]; mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); - - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "check poll event behavior".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; + submit_unified_exec_turn( + &test, + "check poll event behavior", + SandboxPolicy::DangerFullAccess, + ) + .await?; let mut begin_events = Vec::new(); let mut end_events = Vec::new(); let mut task_completed = false; loop { - let event_msg = wait_for_event(&codex, |_| true).await; + let event_msg = wait_for_event(&test.codex, |_| true).await; match event_msg { EventMsg::ExecCommandBegin(event) if event.call_id == open_call_id => { begin_events.push(event); @@ -1271,12 +1139,7 @@ async fn exec_command_reports_chunk_and_exit_metadata() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let call_id = "uexec-metadata"; let args = serde_json::json!({ @@ -1298,29 +1161,12 @@ async fn exec_command_reports_chunk_and_exit_metadata() -> Result<()> { ]; let request_log = mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn(&test, "run metadata test", SandboxPolicy::DangerFullAccess).await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "run metadata test".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; let requests = request_log.requests(); assert!(!requests.is_empty(), "expected at least one POST request"); @@ -1378,14 +1224,6 @@ async fn unified_exec_defaults_to_pipe() -> Result<()> { skip_if_sandbox!(Ok(())); skip_if_windows!(Ok(())); - let python = match which("python").or_else(|_| which("python3")) { - Ok(path) => path, - Err(_) => { - eprintln!("python not found in PATH, skipping tty default test."); - return Ok(()); - } - }; - let server = start_mock_server().await; let mut builder = test_codex().with_config(|config| { @@ -1394,16 +1232,11 @@ async fn unified_exec_defaults_to_pipe() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let call_id = "uexec-default-pipe"; let args = serde_json::json!({ - "cmd": format!("{} -c \"import sys; print(sys.stdin.isatty())\"", python.display()), + "cmd": "python3 -c \"import sys; print(sys.stdin.isatty())\"", "yield_time_ms": 1500, }); @@ -1421,29 +1254,17 @@ async fn unified_exec_defaults_to_pipe() -> Result<()> { ]; let request_log = mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn( + &test, + "check default pipe mode", + SandboxPolicy::DangerFullAccess, + ) + .await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "check default pipe mode".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; let requests = request_log.requests(); assert!(!requests.is_empty(), "expected at least one POST request"); @@ -1472,14 +1293,6 @@ async fn unified_exec_can_enable_tty() -> Result<()> { skip_if_sandbox!(Ok(())); skip_if_windows!(Ok(())); - let python = match which("python").or_else(|_| which("python3")) { - Ok(path) => path, - Err(_) => { - eprintln!("python not found in PATH, skipping tty enable test."); - return Ok(()); - } - }; - let server = start_mock_server().await; let mut builder = test_codex().with_config(|config| { @@ -1488,16 +1301,11 @@ async fn unified_exec_can_enable_tty() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let call_id = "uexec-tty-enabled"; let args = serde_json::json!({ - "cmd": format!("{} -c \"import sys; print(sys.stdin.isatty())\"", python.display()), + "cmd": "python3 -c \"import sys; print(sys.stdin.isatty())\"", "yield_time_ms": 1500, "tty": true, }); @@ -1516,29 +1324,12 @@ async fn unified_exec_can_enable_tty() -> Result<()> { ]; let request_log = mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn(&test, "check tty enabled", SandboxPolicy::DangerFullAccess).await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "check tty enabled".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; let requests = request_log.requests(); assert!(!requests.is_empty(), "expected at least one POST request"); @@ -1576,12 +1367,7 @@ async fn unified_exec_respects_early_exit_notifications() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let call_id = "uexec-early-exit"; let args = serde_json::json!({ @@ -1602,29 +1388,17 @@ async fn unified_exec_respects_early_exit_notifications() -> Result<()> { ]; let request_log = mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn( + &test, + "watch early exit timing", + SandboxPolicy::DangerFullAccess, + ) + .await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "watch early exit timing".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; let requests = request_log.requests(); assert!(!requests.is_empty(), "expected at least one POST request"); @@ -1676,12 +1450,7 @@ async fn write_stdin_returns_exit_metadata_and_clears_session() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let start_call_id = "uexec-cat-start"; let send_call_id = "uexec-cat-send"; @@ -1738,29 +1507,17 @@ async fn write_stdin_returns_exit_metadata_and_clears_session() -> Result<()> { ]; let request_log = mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn( + &test, + "test write_stdin exit behavior", + SandboxPolicy::DangerFullAccess, + ) + .await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "test write_stdin exit behavior".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; let requests = request_log.requests(); assert!(!requests.is_empty(), "expected at least one POST request"); @@ -1847,12 +1604,7 @@ async fn unified_exec_emits_end_event_when_session_dies_via_stdin() -> Result<() .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let start_call_id = "uexec-end-on-exit-start"; let start_args = serde_json::json!({ @@ -1911,30 +1663,10 @@ async fn unified_exec_emits_end_event_when_session_dies_via_stdin() -> Result<() ]; mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); - - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "end on exit".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; + submit_unified_exec_turn(&test, "end on exit", SandboxPolicy::DangerFullAccess).await?; // We expect the ExecCommandEnd event to match the initial exec_command call_id. - let end_event = wait_for_event_match(&codex, |msg| match msg { + let end_event = wait_for_event_match(&test.codex, |msg| match msg { EventMsg::ExecCommandEnd(ev) if ev.call_id == start_call_id => Some(ev.clone()), _ => None, }) @@ -1942,7 +1674,10 @@ async fn unified_exec_emits_end_event_when_session_dies_via_stdin() -> Result<() assert_eq!(end_event.exit_code, 0); - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; Ok(()) } @@ -2148,12 +1883,7 @@ async fn unified_exec_reuses_session_via_stdin() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let first_call_id = "uexec-start"; let first_args = serde_json::json!({ @@ -2195,29 +1925,12 @@ async fn unified_exec_reuses_session_via_stdin() -> Result<()> { ]; let request_log = mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn(&test, "run unified exec", SandboxPolicy::DangerFullAccess).await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "run unified exec".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; let requests = request_log.requests(); assert!(!requests.is_empty(), "expected at least one POST request"); @@ -2269,12 +1982,7 @@ async fn unified_exec_streams_after_lagged_output() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let script = r#"python3 - <<'PY' import sys @@ -2335,30 +2043,15 @@ PY ]; let request_log = mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); - - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "exercise lag handling".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; + submit_unified_exec_turn( + &test, + "exercise lag handling", + SandboxPolicy::DangerFullAccess, + ) + .await?; // This is a worst case scenario for the truncate logic. wait_for_event_with_timeout( - &codex, + &test.codex, |event| matches!(event, EventMsg::TurnComplete(_)), Duration::from_secs(10), ) @@ -2408,12 +2101,7 @@ async fn unified_exec_timeout_and_followup_poll() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let first_call_id = "uexec-timeout"; let first_args = serde_json::json!({ @@ -2454,30 +2142,10 @@ async fn unified_exec_timeout_and_followup_poll() -> Result<()> { ]; let request_log = mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); - - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "check timeout".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; + submit_unified_exec_turn(&test, "check timeout", SandboxPolicy::DangerFullAccess).await?; loop { - let event = codex.next_event().await.expect("event"); + let event = test.codex.next_event().await.expect("event"); if matches!(event.msg, EventMsg::TurnComplete(_)) { break; } @@ -2522,12 +2190,7 @@ async fn unified_exec_formats_large_output_summary() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let script = r#"python3 - <<'PY' import sys @@ -2555,29 +2218,17 @@ PY ]; let request_log = mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn( + &test, + "summarize large output", + SandboxPolicy::DangerFullAccess, + ) + .await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "summarize large output".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; let requests = request_log.requests(); assert!(!requests.is_empty(), "expected at least one POST request"); @@ -2590,22 +2241,8 @@ PY let large_output = outputs.get(call_id).expect("missing large output summary"); let output_text = large_output.output.replace("\r\n", "\n"); - assert!( - output_text.starts_with("Total output lines: "), - "expected large output summary header, got {output_text:?}" - ); - assert!( - output_text.contains("…") && output_text.contains("tokens truncated"), - "expected truncation marker in large output summary, got {output_text:?}" - ); - assert!( - output_text.contains("token token \ntoken token \ntoken token \n"), - "expected preserved output prefix in large output summary, got {output_text:?}" - ); - assert!( - output_text.ends_with("token token ") || output_text.ends_with("token token \n"), - "expected preserved output suffix in large output summary, got {output_text:?}" - ); + let truncated_pattern = r"(?s)^Total output lines: \d+\n\n(token token \n){5,}.*…\d+ tokens truncated….*(token token \n){5,}$"; + assert_regex_match(truncated_pattern, &output_text); let original_tokens = large_output .original_token_count @@ -2690,7 +2327,7 @@ async fn unified_exec_runs_under_sandbox() -> Result<()> { let outputs = collect_tool_outputs(&bodies)?; let output = outputs.get(call_id).expect("missing output"); - assert_eq!(output.output.trim_end_matches(['\r', '\n']), "hello"); + assert_regex_match("hello[\r\n]+", &output.output); Ok(()) } @@ -2840,12 +2477,7 @@ async fn unified_exec_runs_on_all_platforms() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; let call_id = "uexec"; let args = serde_json::json!({ @@ -2865,29 +2497,17 @@ async fn unified_exec_runs_on_all_platforms() -> Result<()> { ]; let request_log = mount_sse_sequence(&server, responses).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn( + &test, + "summarize large output", + SandboxPolicy::DangerFullAccess, + ) + .await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "summarize large output".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; let requests = request_log.requests(); assert!(!requests.is_empty(), "expected at least one POST request"); @@ -2921,12 +2541,7 @@ async fn unified_exec_prunes_exited_sessions_first() -> Result<()> { .enable(Feature::UnifiedExec) .expect("test config should allow feature update"); }); - let TestCodex { - codex, - cwd, - session_configured, - .. - } = builder.build(&server).await?; + let test = builder.build_remote_aware(&server).await?; const MAX_SESSIONS_FOR_TEST: i32 = 64; const FILLER_SESSIONS: i32 = MAX_SESSIONS_FOR_TEST - 1; @@ -3005,29 +2620,12 @@ async fn unified_exec_prunes_exited_sessions_first() -> Result<()> { let response_mock = mount_sse_sequence(&server, vec![first_response, completion_response]).await; - let session_model = session_configured.model.clone(); + submit_unified_exec_turn(&test, "fill session cache", SandboxPolicy::DangerFullAccess).await?; - codex - .submit(Op::UserTurn { - items: vec![UserInput::Text { - text: "fill session cache".into(), - text_elements: Vec::new(), - }], - final_output_json_schema: None, - cwd: cwd.path().to_path_buf(), - approval_policy: AskForApproval::Never, - approvals_reviewer: None, - sandbox_policy: SandboxPolicy::DangerFullAccess, - model: session_model, - effort: None, - summary: None, - service_tier: None, - collaboration_mode: None, - personality: None, - }) - .await?; - - wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await; + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TurnComplete(_)) + }) + .await; let requests = response_mock.requests(); assert!( diff --git a/codex-rs/core/tests/suite/view_image.rs b/codex-rs/core/tests/suite/view_image.rs index f16d3c2b18..b213e305e4 100644 --- a/codex-rs/core/tests/suite/view_image.rs +++ b/codex-rs/core/tests/suite/view_image.rs @@ -85,7 +85,7 @@ fn png_bytes(width: u32, height: u32, rgba: [u8; 4]) -> anyhow::Result> } async fn create_workspace_directory(test: &TestCodex, rel_path: &str) -> anyhow::Result { - let abs_path = test.config.cwd.join(rel_path)?; + let abs_path = test.config.cwd.join(rel_path); test.fs() .create_directory(&abs_path, CreateDirectoryOptions { recursive: true }) .await?; @@ -97,7 +97,7 @@ async fn write_workspace_file( rel_path: &str, contents: Vec, ) -> anyhow::Result { - let abs_path = test.config.cwd.join(rel_path)?; + let abs_path = test.config.cwd.join(rel_path); if let Some(parent) = abs_path.parent() { test.fs() .create_directory(&parent, CreateDirectoryOptions { recursive: true }) @@ -226,7 +226,7 @@ async fn view_image_tool_attaches_local_image() -> anyhow::Result<()> { let cwd = config.cwd.clone(); let rel_path = "assets/example.png"; - let abs_path = cwd.join(rel_path)?; + let abs_path = cwd.join(rel_path); let original_width = 2304; let original_height = 864; write_workspace_png( @@ -1258,7 +1258,7 @@ async fn view_image_tool_errors_when_file_missing() -> anyhow::Result<()> { } = &test; let rel_path = "missing/example.png"; - let abs_path = config.cwd.join(rel_path)?; + let abs_path = config.cwd.join(rel_path); let call_id = "view-image-missing"; let arguments = serde_json::json!({ "path": rel_path }).to_string(); diff --git a/codex-rs/exec-server/src/environment.rs b/codex-rs/exec-server/src/environment.rs index 1ad729075b..2385b186a2 100644 --- a/codex-rs/exec-server/src/environment.rs +++ b/codex-rs/exec-server/src/environment.rs @@ -14,44 +14,83 @@ use crate::remote_process::RemoteProcess; pub const CODEX_EXEC_SERVER_URL_ENV_VAR: &str = "CODEX_EXEC_SERVER_URL"; -pub trait ExecutorEnvironment: Send + Sync { - fn get_exec_backend(&self) -> Arc; -} - -#[derive(Debug, Default)] +/// Lazily creates and caches the active environment for a session. +/// +/// The manager keeps the session's environment selection stable so subagents +/// and follow-up turns preserve an explicit disabled state. +#[derive(Debug)] pub struct EnvironmentManager { exec_server_url: Option, - current_environment: OnceCell>, + disabled: bool, + current_environment: OnceCell>>, +} + +impl Default for EnvironmentManager { + fn default() -> Self { + Self::new(/*exec_server_url*/ None) + } } impl EnvironmentManager { + /// Builds a manager from the raw `CODEX_EXEC_SERVER_URL` value. pub fn new(exec_server_url: Option) -> Self { + let (exec_server_url, disabled) = normalize_exec_server_url(exec_server_url); Self { - exec_server_url: normalize_exec_server_url(exec_server_url), + exec_server_url, + disabled, current_environment: OnceCell::new(), } } + /// Builds a manager from process environment variables. pub fn from_env() -> Self { Self::new(std::env::var(CODEX_EXEC_SERVER_URL_ENV_VAR).ok()) } + /// Builds a manager from the currently selected environment, or from the + /// disabled mode when no environment is available. + pub fn from_environment(environment: Option<&Environment>) -> Self { + match environment { + Some(environment) => Self { + exec_server_url: environment.exec_server_url().map(str::to_owned), + disabled: false, + current_environment: OnceCell::new(), + }, + None => Self { + exec_server_url: None, + disabled: true, + current_environment: OnceCell::new(), + }, + } + } + + /// Returns the remote exec-server URL when one is configured. pub fn exec_server_url(&self) -> Option<&str> { self.exec_server_url.as_deref() } - pub async fn current(&self) -> Result, ExecServerError> { + /// Returns the cached environment, creating it on first access. + pub async fn current(&self) -> Result>, ExecServerError> { self.current_environment .get_or_try_init(|| async { - Ok(Arc::new( - Environment::create(self.exec_server_url.clone()).await?, - )) + if self.disabled { + Ok(None) + } else { + Ok(Some(Arc::new( + Environment::create(self.exec_server_url.clone()).await?, + ))) + } }) .await - .map(Arc::clone) + .map(Option::as_ref) + .map(std::option::Option::<&Arc>::cloned) } } +/// Concrete execution/filesystem environment selected for a session. +/// +/// This bundles the selected backend together with the corresponding remote +/// client, if any. #[derive(Clone)] pub struct Environment { exec_server_url: Option, @@ -86,12 +125,19 @@ impl std::fmt::Debug for Environment { } impl Environment { + /// Builds an environment from the raw `CODEX_EXEC_SERVER_URL` value. pub async fn create(exec_server_url: Option) -> Result { - let exec_server_url = normalize_exec_server_url(exec_server_url); - let remote_exec_server_client = if let Some(url) = &exec_server_url { + let (exec_server_url, disabled) = normalize_exec_server_url(exec_server_url); + if disabled { + return Err(ExecServerError::Protocol( + "disabled mode does not create an Environment".to_string(), + )); + } + + let remote_exec_server_client = if let Some(exec_server_url) = &exec_server_url { Some( ExecServerClient::connect_websocket(RemoteExecServerConnectArgs { - websocket_url: url.clone(), + websocket_url: exec_server_url.clone(), client_name: "codex-environment".to_string(), connect_timeout: std::time::Duration::from_secs(5), initialize_timeout: std::time::Duration::from_secs(5), @@ -102,10 +148,14 @@ impl Environment { None }; - let exec_backend: Arc = - if let Some(client) = remote_exec_server_client.clone() { - Arc::new(RemoteProcess::new(client)) - } else { + let exec_backend: Arc = match remote_exec_server_client.clone() { + Some(client) => Arc::new(RemoteProcess::new(client)), + None if exec_server_url.is_some() => { + return Err(ExecServerError::Protocol( + "remote mode should have an exec-server client".to_string(), + )); + } + None => { let local_process = LocalProcess::default(); local_process .initialize() @@ -114,7 +164,8 @@ impl Environment { .initialized() .map_err(ExecServerError::Protocol)?; Arc::new(local_process) - }; + } + }; Ok(Self { exec_server_url, @@ -123,6 +174,11 @@ impl Environment { }) } + pub fn is_remote(&self) -> bool { + self.exec_server_url.is_some() + } + + /// Returns the remote exec-server URL when this environment is remote. pub fn exec_server_url(&self) -> Option<&str> { self.exec_server_url.as_deref() } @@ -132,27 +188,20 @@ impl Environment { } pub fn get_filesystem(&self) -> Arc { - if let Some(client) = self.remote_exec_server_client.clone() { - Arc::new(RemoteFileSystem::new(client)) - } else { - Arc::new(LocalFileSystem) + match self.remote_exec_server_client.clone() { + Some(client) => Arc::new(RemoteFileSystem::new(client)), + None => Arc::new(LocalFileSystem), } } } -fn normalize_exec_server_url(exec_server_url: Option) -> Option { - exec_server_url.and_then(|url| { - let url = url.trim(); - (!url.is_empty()).then(|| url.to_string()) - }) -} - -impl ExecutorEnvironment for Environment { - fn get_exec_backend(&self) -> Arc { - Arc::clone(&self.exec_backend) +fn normalize_exec_server_url(exec_server_url: Option) -> (Option, bool) { + match exec_server_url.as_deref().map(str::trim) { + None | Some("") => (None, false), + Some(url) if url.eq_ignore_ascii_case("none") => (None, true), + Some(url) => (Some(url.to_string()), false), } } - #[cfg(test)] mod tests { use std::sync::Arc; @@ -164,7 +213,7 @@ mod tests { use pretty_assertions::assert_eq; #[tokio::test] - async fn create_without_remote_exec_server_url_does_not_connect() { + async fn create_local_environment_does_not_connect() { let environment = Environment::create(/*exec_server_url*/ None) .await .expect("create environment"); @@ -177,6 +226,15 @@ mod tests { fn environment_manager_normalizes_empty_url() { let manager = EnvironmentManager::new(Some(String::new())); + assert!(!manager.disabled); + assert_eq!(manager.exec_server_url(), None); + } + + #[test] + fn environment_manager_treats_none_value_as_disabled() { + let manager = EnvironmentManager::new(Some("none".to_string())); + + assert!(manager.disabled); assert_eq!(manager.exec_server_url(), None); } @@ -187,9 +245,25 @@ mod tests { let first = manager.current().await.expect("get current environment"); let second = manager.current().await.expect("get current environment"); + let first = first.expect("local environment"); + let second = second.expect("local environment"); + assert!(Arc::ptr_eq(&first, &second)); } + #[tokio::test] + async fn disabled_environment_manager_has_no_current_environment() { + let manager = EnvironmentManager::new(Some("none".to_string())); + + assert!( + manager + .current() + .await + .expect("get current environment") + .is_none() + ); + } + #[tokio::test] async fn default_environment_has_ready_local_executor() { let environment = Environment::default(); diff --git a/codex-rs/exec-server/src/file_system.rs b/codex-rs/exec-server/src/file_system.rs index 35c2243f8e..b04b480893 100644 --- a/codex-rs/exec-server/src/file_system.rs +++ b/codex-rs/exec-server/src/file_system.rs @@ -39,6 +39,12 @@ pub type FileSystemResult = io::Result; pub trait ExecutorFileSystem: Send + Sync { async fn read_file(&self, path: &AbsolutePathBuf) -> FileSystemResult>; + /// Reads a file and decodes it as UTF-8 text. + async fn read_file_text(&self, path: &AbsolutePathBuf) -> FileSystemResult { + let bytes = self.read_file(path).await?; + String::from_utf8(bytes).map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err)) + } + async fn write_file(&self, path: &AbsolutePathBuf, contents: Vec) -> FileSystemResult<()>; async fn create_directory( diff --git a/codex-rs/exec-server/src/lib.rs b/codex-rs/exec-server/src/lib.rs index 77834c6143..88378da9ee 100644 --- a/codex-rs/exec-server/src/lib.rs +++ b/codex-rs/exec-server/src/lib.rs @@ -34,7 +34,6 @@ pub use codex_app_server_protocol::FsWriteFileResponse; pub use environment::CODEX_EXEC_SERVER_URL_ENV_VAR; pub use environment::Environment; pub use environment::EnvironmentManager; -pub use environment::ExecutorEnvironment; pub use file_system::CopyOptions; pub use file_system::CreateDirectoryOptions; pub use file_system::ExecutorFileSystem; @@ -42,6 +41,7 @@ pub use file_system::FileMetadata; pub use file_system::FileSystemResult; pub use file_system::ReadDirectoryEntry; pub use file_system::RemoveOptions; +pub use local_file_system::LOCAL_FS; pub use process::ExecBackend; pub use process::ExecProcess; pub use process::StartedExecProcess; diff --git a/codex-rs/exec-server/src/local_file_system.rs b/codex-rs/exec-server/src/local_file_system.rs index fba7efa306..69ab9d64b0 100644 --- a/codex-rs/exec-server/src/local_file_system.rs +++ b/codex-rs/exec-server/src/local_file_system.rs @@ -3,6 +3,8 @@ use codex_utils_absolute_path::AbsolutePathBuf; use std::path::Component; use std::path::Path; use std::path::PathBuf; +use std::sync::Arc; +use std::sync::LazyLock; use std::time::SystemTime; use std::time::UNIX_EPOCH; use tokio::io; @@ -17,6 +19,9 @@ use crate::RemoveOptions; const MAX_READ_FILE_BYTES: u64 = 512 * 1024 * 1024; +pub static LOCAL_FS: LazyLock> = + LazyLock::new(|| -> Arc { Arc::new(LocalFileSystem) }); + #[derive(Clone, Default)] pub(crate) struct LocalFileSystem; diff --git a/codex-rs/exec-server/src/remote_file_system.rs b/codex-rs/exec-server/src/remote_file_system.rs index 27e4fee911..7da10b0e50 100644 --- a/codex-rs/exec-server/src/remote_file_system.rs +++ b/codex-rs/exec-server/src/remote_file_system.rs @@ -23,6 +23,7 @@ use crate::ReadDirectoryEntry; use crate::RemoveOptions; const INVALID_REQUEST_ERROR_CODE: i64 = -32600; +const NOT_FOUND_ERROR_CODE: i64 = -32004; #[derive(Clone)] pub(crate) struct RemoteFileSystem { @@ -151,6 +152,9 @@ impl ExecutorFileSystem for RemoteFileSystem { fn map_remote_error(error: ExecServerError) -> io::Error { match error { + ExecServerError::Server { code, message } if code == NOT_FOUND_ERROR_CODE => { + io::Error::new(io::ErrorKind::NotFound, message) + } ExecServerError::Server { code, message } if code == INVALID_REQUEST_ERROR_CODE => { io::Error::new(io::ErrorKind::InvalidInput, message) } diff --git a/codex-rs/exec-server/src/rpc.rs b/codex-rs/exec-server/src/rpc.rs index f2d725e69c..bf7cb27a5a 100644 --- a/codex-rs/exec-server/src/rpc.rs +++ b/codex-rs/exec-server/src/rpc.rs @@ -356,6 +356,14 @@ pub(crate) fn invalid_params(message: String) -> JSONRPCErrorError { } } +pub(crate) fn not_found(message: String) -> JSONRPCErrorError { + JSONRPCErrorError { + code: -32004, + data: None, + message, + } +} + pub(crate) fn internal_error(message: String) -> JSONRPCErrorError { JSONRPCErrorError { code: -32603, diff --git a/codex-rs/exec-server/src/server/file_system_handler.rs b/codex-rs/exec-server/src/server/file_system_handler.rs index 2e4e1592d1..2f6c679dcc 100644 --- a/codex-rs/exec-server/src/server/file_system_handler.rs +++ b/codex-rs/exec-server/src/server/file_system_handler.rs @@ -26,6 +26,7 @@ use crate::RemoveOptions; use crate::local_file_system::LocalFileSystem; use crate::rpc::internal_error; use crate::rpc::invalid_request; +use crate::rpc::not_found; #[derive(Clone, Default)] pub(crate) struct FileSystemHandler { @@ -153,7 +154,9 @@ impl FileSystemHandler { } fn map_fs_error(err: io::Error) -> JSONRPCErrorError { - if err.kind() == io::ErrorKind::InvalidInput { + if err.kind() == io::ErrorKind::NotFound { + not_found(err.to_string()) + } else if err.kind() == io::ErrorKind::InvalidInput { invalid_request(err.to_string()) } else { internal_error(err.to_string()) diff --git a/codex-rs/exec-server/tests/file_system.rs b/codex-rs/exec-server/tests/file_system.rs index dea47e8fcc..ca55587f5a 100644 --- a/codex-rs/exec-server/tests/file_system.rs +++ b/codex-rs/exec-server/tests/file_system.rs @@ -122,6 +122,12 @@ async fn file_system_methods_cover_surface_area(use_remote: bool) -> Result<()> .with_context(|| format!("mode={use_remote}"))?; assert_eq!(nested_file_contents, b"hello from trait"); + let nested_file_text = file_system + .read_file_text(&absolute_path(nested_file.clone())) + .await + .with_context(|| format!("mode={use_remote}"))?; + assert_eq!(nested_file_text, "hello from trait"); + file_system .copy( &absolute_path(nested_file), diff --git a/codex-rs/exec/src/event_processor_with_human_output_tests.rs b/codex-rs/exec/src/event_processor_with_human_output_tests.rs index 2b625dd564..232be7f02c 100644 --- a/codex-rs/exec/src/event_processor_with_human_output_tests.rs +++ b/codex-rs/exec/src/event_processor_with_human_output_tests.rs @@ -167,6 +167,9 @@ fn turn_completed_recovers_final_message_from_turn_items() { }], status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: Some(0), + duration_ms: None, }, }, )); @@ -211,6 +214,9 @@ fn turn_completed_overwrites_stale_final_message_from_turn_items() { }], status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: Some(0), + duration_ms: None, }, }, )); @@ -251,6 +257,9 @@ fn turn_completed_preserves_streamed_final_message_when_turn_items_are_empty() { items: Vec::new(), status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: Some(0), + duration_ms: None, }, }, )); @@ -291,6 +300,9 @@ fn turn_failed_clears_stale_final_message() { items: Vec::new(), status: TurnStatus::Failed, error: None, + started_at: None, + completed_at: Some(0), + duration_ms: None, }, }, )); @@ -332,6 +344,9 @@ fn turn_interrupted_clears_stale_final_message() { items: Vec::new(), status: TurnStatus::Interrupted, error: None, + started_at: None, + completed_at: Some(0), + duration_ms: None, }, }, )); diff --git a/codex-rs/exec/src/event_processor_with_jsonl_output_tests.rs b/codex-rs/exec/src/event_processor_with_jsonl_output_tests.rs index ffb4d1ed01..2a26ec3c7e 100644 --- a/codex-rs/exec/src/event_processor_with_jsonl_output_tests.rs +++ b/codex-rs/exec/src/event_processor_with_jsonl_output_tests.rs @@ -38,6 +38,9 @@ fn failed_turn_does_not_overwrite_output_last_message_file() { additional_details: None, codex_error_info: None, }), + started_at: None, + completed_at: Some(0), + duration_ms: None, }, }, )); diff --git a/codex-rs/exec/src/lib.rs b/codex-rs/exec/src/lib.rs index 5b77582db5..8c849ca806 100644 --- a/codex-rs/exec/src/lib.rs +++ b/codex-rs/exec/src/lib.rs @@ -7,8 +7,8 @@ mod cli; mod event_processor; mod event_processor_with_human_output; -pub mod event_processor_with_jsonl_output; -pub mod exec_events; +pub(crate) mod event_processor_with_jsonl_output; +pub(crate) mod exec_events; pub use cli::Cli; pub use cli::Command; @@ -85,7 +85,42 @@ use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_oss::ensure_oss_provider_ready; use codex_utils_oss::get_default_model_for_oss_provider; use event_processor_with_human_output::EventProcessorWithHumanOutput; -use event_processor_with_jsonl_output::EventProcessorWithJsonOutput; +pub use event_processor_with_jsonl_output::CodexStatus; +pub use event_processor_with_jsonl_output::CollectedThreadEvents; +pub use event_processor_with_jsonl_output::EventProcessorWithJsonOutput; +pub use exec_events::AgentMessageItem; +pub use exec_events::CollabAgentState; +pub use exec_events::CollabAgentStatus; +pub use exec_events::CollabTool; +pub use exec_events::CollabToolCallItem; +pub use exec_events::CollabToolCallStatus; +pub use exec_events::CommandExecutionItem; +pub use exec_events::CommandExecutionStatus; +pub use exec_events::ErrorItem; +pub use exec_events::FileChangeItem; +pub use exec_events::FileUpdateChange; +pub use exec_events::ItemCompletedEvent; +pub use exec_events::ItemStartedEvent; +pub use exec_events::ItemUpdatedEvent; +pub use exec_events::McpToolCallItem; +pub use exec_events::McpToolCallItemError; +pub use exec_events::McpToolCallItemResult; +pub use exec_events::McpToolCallStatus; +pub use exec_events::PatchApplyStatus; +pub use exec_events::PatchChangeKind; +pub use exec_events::ReasoningItem; +pub use exec_events::ThreadErrorEvent; +pub use exec_events::ThreadEvent; +pub use exec_events::ThreadItem as ExecThreadItem; +pub use exec_events::ThreadItemDetails; +pub use exec_events::ThreadStartedEvent; +pub use exec_events::TodoItem; +pub use exec_events::TodoListItem; +pub use exec_events::TurnCompletedEvent; +pub use exec_events::TurnFailedEvent; +pub use exec_events::TurnStartedEvent; +pub use exec_events::Usage; +pub use exec_events::WebSearchItem; use serde_json::Value; use std::collections::HashMap; use std::io::IsTerminal; @@ -105,7 +140,6 @@ use tracing_subscriber::prelude::*; use uuid::Uuid; use crate::cli::Command as ExecCommand; -use crate::event_processor::CodexStatus; use crate::event_processor::EventProcessor; const DEFAULT_ANALYTICS_ENABLED: bool = true; @@ -514,6 +548,66 @@ async fn run_exec_session(args: ExecRunArgs) -> anyhow::Result<()> { let default_sandbox_policy = config.permissions.sandbox_policy.get(); let default_effort = config.model_reasoning_effort; + let (initial_operation, prompt_summary) = match (command.as_ref(), prompt, images) { + (Some(ExecCommand::Review(review_cli)), _, _) => { + let review_request = build_review_request(review_cli)?; + let summary = codex_core::review_prompts::user_facing_hint(&review_request.target); + (InitialOperation::Review { review_request }, summary) + } + (Some(ExecCommand::Resume(args)), root_prompt, imgs) => { + let prompt_arg = args + .prompt + .clone() + .or_else(|| { + if args.last { + args.session_id.clone() + } else { + None + } + }) + .or(root_prompt); + let prompt_text = resolve_prompt(prompt_arg); + let mut items: Vec = imgs + .into_iter() + .chain(args.images.iter().cloned()) + .map(|path| UserInput::LocalImage { path }) + .collect(); + items.push(UserInput::Text { + text: prompt_text.clone(), + // CLI input doesn't track UI element ranges, so none are available here. + text_elements: Vec::new(), + }); + let output_schema = load_output_schema(output_schema_path.clone()); + ( + InitialOperation::UserTurn { + items, + output_schema, + }, + prompt_text, + ) + } + (None, root_prompt, imgs) => { + let prompt_text = resolve_root_prompt(root_prompt); + let mut items: Vec = imgs + .into_iter() + .map(|path| UserInput::LocalImage { path }) + .collect(); + items.push(UserInput::Text { + text: prompt_text.clone(), + // CLI input doesn't track UI element ranges, so none are available here. + text_elements: Vec::new(), + }); + let output_schema = load_output_schema(output_schema_path); + ( + InitialOperation::UserTurn { + items, + output_schema, + }, + prompt_text, + ) + } + }; + // When --yolo (dangerously_bypass_approvals_and_sandbox) is set, also skip the git repo check // since the user is explicitly running in an externally sandboxed environment. if !skip_git_repo_check @@ -588,66 +682,6 @@ async fn run_exec_session(args: ExecRunArgs) -> anyhow::Result<()> { exec_span.record("thread.id", primary_thread_id_for_span.as_str()); - let (initial_operation, prompt_summary) = match (command.as_ref(), prompt, images) { - (Some(ExecCommand::Review(review_cli)), _, _) => { - let review_request = build_review_request(review_cli)?; - let summary = codex_core::review_prompts::user_facing_hint(&review_request.target); - (InitialOperation::Review { review_request }, summary) - } - (Some(ExecCommand::Resume(args)), root_prompt, imgs) => { - let prompt_arg = args - .prompt - .clone() - .or_else(|| { - if args.last { - args.session_id.clone() - } else { - None - } - }) - .or(root_prompt); - let prompt_text = resolve_prompt(prompt_arg); - let mut items: Vec = imgs - .into_iter() - .chain(args.images.iter().cloned()) - .map(|path| UserInput::LocalImage { path }) - .collect(); - items.push(UserInput::Text { - text: prompt_text.clone(), - // CLI input doesn't track UI element ranges, so none are available here. - text_elements: Vec::new(), - }); - let output_schema = load_output_schema(output_schema_path.clone()); - ( - InitialOperation::UserTurn { - items, - output_schema, - }, - prompt_text, - ) - } - (None, root_prompt, imgs) => { - let prompt_text = resolve_root_prompt(root_prompt); - let mut items: Vec = imgs - .into_iter() - .map(|path| UserInput::LocalImage { path }) - .collect(); - items.push(UserInput::Text { - text: prompt_text.clone(), - // CLI input doesn't track UI element ranges, so none are available here. - text_elements: Vec::new(), - }); - let output_schema = load_output_schema(output_schema_path); - ( - InitialOperation::UserTurn { - items, - output_schema, - }, - prompt_text, - ) - } - }; - // Print the effective configuration and initial request so users can see what Codex // is using. event_processor.print_config_summary(&config, &prompt_summary, &session_configured); diff --git a/codex-rs/exec/src/lib_tests.rs b/codex-rs/exec/src/lib_tests.rs index 4746ed20d0..0af5cc5250 100644 --- a/codex-rs/exec/src/lib_tests.rs +++ b/codex-rs/exec/src/lib_tests.rs @@ -268,6 +268,9 @@ fn turn_items_for_thread_returns_matching_turn_items() { }], status: codex_app_server_protocol::TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, codex_app_server_protocol::Turn { id: "turn-2".to_string(), @@ -277,6 +280,9 @@ fn turn_items_for_thread_returns_matching_turn_items() { }], status: codex_app_server_protocol::TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, ], }; @@ -303,6 +309,9 @@ fn should_backfill_turn_completed_items_skips_ephemeral_threads() { items: Vec::new(), status: codex_app_server_protocol::TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, }); diff --git a/codex-rs/exec/tests/event_processor_with_json_output.rs b/codex-rs/exec/tests/event_processor_with_json_output.rs index 5491e895e2..bb29366561 100644 --- a/codex-rs/exec/tests/event_processor_with_json_output.rs +++ b/codex-rs/exec/tests/event_processor_with_json_output.rs @@ -37,42 +37,42 @@ use codex_protocol::protocol::SessionConfiguredEvent; use pretty_assertions::assert_eq; use serde_json::json; -use codex_exec::event_processor_with_jsonl_output::CodexStatus; -use codex_exec::event_processor_with_jsonl_output::CollectedThreadEvents; -use codex_exec::event_processor_with_jsonl_output::EventProcessorWithJsonOutput; -use codex_exec::exec_events::AgentMessageItem; -use codex_exec::exec_events::CollabAgentState; -use codex_exec::exec_events::CollabAgentStatus; -use codex_exec::exec_events::CollabTool; -use codex_exec::exec_events::CollabToolCallItem; -use codex_exec::exec_events::CollabToolCallStatus; -use codex_exec::exec_events::CommandExecutionItem; -use codex_exec::exec_events::CommandExecutionStatus; -use codex_exec::exec_events::ErrorItem; -use codex_exec::exec_events::FileChangeItem; -use codex_exec::exec_events::FileUpdateChange as ExecFileUpdateChange; -use codex_exec::exec_events::ItemCompletedEvent; -use codex_exec::exec_events::ItemStartedEvent; -use codex_exec::exec_events::ItemUpdatedEvent; -use codex_exec::exec_events::McpToolCallItem; -use codex_exec::exec_events::McpToolCallItemError; -use codex_exec::exec_events::McpToolCallItemResult; -use codex_exec::exec_events::McpToolCallStatus; -use codex_exec::exec_events::PatchApplyStatus; -use codex_exec::exec_events::PatchChangeKind; -use codex_exec::exec_events::ReasoningItem; -use codex_exec::exec_events::ThreadErrorEvent; -use codex_exec::exec_events::ThreadEvent; -use codex_exec::exec_events::ThreadItem as ExecThreadItem; -use codex_exec::exec_events::ThreadItemDetails; -use codex_exec::exec_events::ThreadStartedEvent; -use codex_exec::exec_events::TodoItem; -use codex_exec::exec_events::TodoListItem; -use codex_exec::exec_events::TurnCompletedEvent; -use codex_exec::exec_events::TurnFailedEvent; -use codex_exec::exec_events::TurnStartedEvent; -use codex_exec::exec_events::Usage; -use codex_exec::exec_events::WebSearchItem; +use codex_exec::AgentMessageItem; +use codex_exec::CodexStatus; +use codex_exec::CollabAgentState; +use codex_exec::CollabAgentStatus; +use codex_exec::CollabTool; +use codex_exec::CollabToolCallItem; +use codex_exec::CollabToolCallStatus; +use codex_exec::CollectedThreadEvents; +use codex_exec::CommandExecutionItem; +use codex_exec::CommandExecutionStatus; +use codex_exec::ErrorItem; +use codex_exec::EventProcessorWithJsonOutput; +use codex_exec::ExecThreadItem; +use codex_exec::FileChangeItem; +use codex_exec::FileUpdateChange as ExecFileUpdateChange; +use codex_exec::ItemCompletedEvent; +use codex_exec::ItemStartedEvent; +use codex_exec::ItemUpdatedEvent; +use codex_exec::McpToolCallItem; +use codex_exec::McpToolCallItemError; +use codex_exec::McpToolCallItemResult; +use codex_exec::McpToolCallStatus; +use codex_exec::PatchApplyStatus; +use codex_exec::PatchChangeKind; +use codex_exec::ReasoningItem; +use codex_exec::ThreadErrorEvent; +use codex_exec::ThreadEvent; +use codex_exec::ThreadItemDetails; +use codex_exec::ThreadStartedEvent; +use codex_exec::TodoItem; +use codex_exec::TodoListItem; +use codex_exec::TurnCompletedEvent; +use codex_exec::TurnFailedEvent; +use codex_exec::TurnStartedEvent; +use codex_exec::Usage; +use codex_exec::WebSearchItem; #[test] fn map_todo_items_preserves_text_and_completion_state() { @@ -144,6 +144,9 @@ fn turn_started_emits_turn_started_event() { items: Vec::new(), status: TurnStatus::InProgress, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, })); @@ -479,6 +482,7 @@ fn mcp_tool_call_begin_and_end_emit_item_events() { result: Some(McpToolCallResult { content: Vec::new(), structured_content: None, + meta: None, }), error: None, duration_ms: Some(1_000), @@ -610,6 +614,7 @@ fn mcp_tool_call_defaults_arguments_and_preserves_structured_content() { "text": "done", })], structured_content: Some(json!({ "status": "ok" })), + meta: None, }), error: None, duration_ms: Some(10), @@ -1066,6 +1071,9 @@ fn plan_update_emits_started_then_updated_then_completed() { items: Vec::new(), status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, }, )); @@ -1122,6 +1130,9 @@ fn plan_update_after_completion_starts_new_todo_list_with_new_id() { items: Vec::new(), status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, }, )); @@ -1201,6 +1212,9 @@ fn token_usage_update_is_emitted_on_turn_completion() { items: Vec::new(), status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, }, )); @@ -1236,6 +1250,9 @@ fn turn_completion_recovers_final_message_from_turn_items() { }], status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, }, )); @@ -1310,6 +1327,9 @@ fn turn_completion_reconciles_started_items_from_turn_items() { }], status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, }, )); @@ -1367,6 +1387,9 @@ fn turn_completion_overwrites_stale_final_message_from_turn_items() { }], status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, }, )); @@ -1407,6 +1430,9 @@ fn turn_completion_preserves_streamed_final_message_when_turn_items_are_empty() items: Vec::new(), status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, }, )); @@ -1455,6 +1481,9 @@ fn failed_turn_clears_stale_final_message() { additional_details: None, codex_error_info: None, }), + started_at: None, + completed_at: None, + duration_ms: None, }, }, )); @@ -1478,6 +1507,9 @@ fn turn_completion_falls_back_to_final_plan_text() { }], status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, }, )); @@ -1526,6 +1558,9 @@ fn turn_failure_prefers_structured_error_message() { items: Vec::new(), status: TurnStatus::Failed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, }, )); diff --git a/codex-rs/exec/tests/suite/sandbox.rs b/codex-rs/exec/tests/suite/sandbox.rs index 70f5197e82..ee53e7d437 100644 --- a/codex-rs/exec/tests/suite/sandbox.rs +++ b/codex-rs/exec/tests/suite/sandbox.rs @@ -42,7 +42,7 @@ async fn spawn_command_under_sandbox( stdio_policy: StdioPolicy, env: HashMap, ) -> std::io::Result { - use codex_core::landlock::spawn_command_under_linux_sandbox; + use codex_core::spawn_command_under_linux_sandbox; let codex_linux_sandbox_exe = core_test_support::find_codex_linux_sandbox_exe() .map_err(|err| io::Error::new(io::ErrorKind::NotFound, err))?; spawn_command_under_linux_sandbox( diff --git a/codex-rs/execpolicy/src/amend.rs b/codex-rs/execpolicy/src/amend.rs index 7909fc14a4..e25fd1bd91 100644 --- a/codex-rs/execpolicy/src/amend.rs +++ b/codex-rs/execpolicy/src/amend.rs @@ -9,7 +9,6 @@ use std::path::PathBuf; use crate::decision::Decision; use crate::rule::NetworkRuleProtocol; use crate::rule::normalize_network_rule_host; -use serde_json; use thiserror::Error; #[derive(Debug, Error)] diff --git a/codex-rs/execpolicy/src/lib.rs b/codex-rs/execpolicy/src/lib.rs index 8cb89c36e4..128c9dd41e 100644 --- a/codex-rs/execpolicy/src/lib.rs +++ b/codex-rs/execpolicy/src/lib.rs @@ -1,11 +1,11 @@ -pub mod amend; -pub mod decision; -pub mod error; -pub mod execpolicycheck; +pub(crate) mod amend; +pub(crate) mod decision; +pub(crate) mod error; +pub(crate) mod execpolicycheck; mod executable_name; -pub mod parser; -pub mod policy; -pub mod rule; +pub(crate) mod parser; +pub(crate) mod policy; +pub(crate) mod rule; pub use amend::AmendError; pub use amend::blocking_append_allow_prefix_rule; @@ -22,6 +22,9 @@ pub use policy::Evaluation; pub use policy::MatchOptions; pub use policy::Policy; pub use rule::NetworkRuleProtocol; +pub use rule::PatternToken; +pub use rule::PrefixPattern; +pub use rule::PrefixRule; pub use rule::Rule; pub use rule::RuleMatch; pub use rule::RuleRef; diff --git a/codex-rs/execpolicy/src/main.rs b/codex-rs/execpolicy/src/main.rs index d3b34a3307..82e1bc12c4 100644 --- a/codex-rs/execpolicy/src/main.rs +++ b/codex-rs/execpolicy/src/main.rs @@ -1,6 +1,6 @@ use anyhow::Result; use clap::Parser; -use codex_execpolicy::execpolicycheck::ExecPolicyCheckCommand; +use codex_execpolicy::ExecPolicyCheckCommand; /// CLI for evaluating exec policies #[derive(Parser)] diff --git a/codex-rs/execpolicy/src/parser.rs b/codex-rs/execpolicy/src/parser.rs index 46764d0e07..5d01df18bb 100644 --- a/codex-rs/execpolicy/src/parser.rs +++ b/codex-rs/execpolicy/src/parser.rs @@ -1,6 +1,5 @@ use codex_utils_absolute_path::AbsolutePathBuf; use multimap::MultiMap; -use shlex; use starlark::any::ProvidesStaticType; use starlark::codemap::FileSpan; use starlark::environment::GlobalsBuilder; diff --git a/codex-rs/execpolicy/tests/basic.rs b/codex-rs/execpolicy/tests/basic.rs index bef845da67..50c3f5361f 100644 --- a/codex-rs/execpolicy/tests/basic.rs +++ b/codex-rs/execpolicy/tests/basic.rs @@ -10,14 +10,14 @@ use codex_execpolicy::Error; use codex_execpolicy::Evaluation; use codex_execpolicy::MatchOptions; use codex_execpolicy::NetworkRuleProtocol; +use codex_execpolicy::PatternToken; use codex_execpolicy::Policy; use codex_execpolicy::PolicyParser; +use codex_execpolicy::PrefixPattern; +use codex_execpolicy::PrefixRule; use codex_execpolicy::RuleMatch; use codex_execpolicy::RuleRef; use codex_execpolicy::blocking_append_allow_prefix_rule; -use codex_execpolicy::rule::PatternToken; -use codex_execpolicy::rule::PrefixPattern; -use codex_execpolicy::rule::PrefixRule; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use tempfile::tempdir; diff --git a/codex-rs/features/Cargo.toml b/codex-rs/features/Cargo.toml index add5296d8c..95be6c400d 100644 --- a/codex-rs/features/Cargo.toml +++ b/codex-rs/features/Cargo.toml @@ -13,7 +13,6 @@ path = "src/lib.rs" workspace = true [dependencies] -codex-login = { workspace = true } codex-otel = { workspace = true } codex-protocol = { workspace = true } schemars = { workspace = true } diff --git a/codex-rs/features/src/lib.rs b/codex-rs/features/src/lib.rs index c87b6dc9ff..23c23c9916 100644 --- a/codex-rs/features/src/lib.rs +++ b/codex-rs/features/src/lib.rs @@ -3,8 +3,6 @@ //! This crate defines the feature registry plus the logic used to resolve an //! effective feature set from config-like inputs. -use codex_login::AuthManager; -use codex_login::CodexAuth; use codex_otel::SessionTelemetry; use codex_protocol::protocol::Event; use codex_protocol::protocol::EventMsg; @@ -140,6 +138,8 @@ pub enum Feature { Collab, /// Enable task-path-based multi-agent routing. MultiAgentV2, + /// Hide spawn_agent agent/model override fields from the model-visible tool schema. + DebugHideSpawnAgentMetadata, /// Enable CSV-backed agent job tools. SpawnCsv, /// Enable apps. @@ -176,6 +176,8 @@ pub enum Feature { FastMode, /// Enable experimental realtime voice conversation mode in the TUI. RealtimeConversation, + /// Connect app-server to the ChatGPT remote control service. + RemoteControl, /// Removed compatibility flag. The TUI now always uses the app-server implementation. TuiAppServer, /// Prevent idle system sleep while a turn is actively running. @@ -273,25 +275,8 @@ impl Features { self.enabled.contains(&f) } - pub async fn apps_enabled(&self, auth_manager: Option<&AuthManager>) -> bool { - if !self.enabled(Feature::Apps) { - return false; - } - - let auth = match auth_manager { - Some(auth_manager) => auth_manager.auth().await, - None => None, - }; - self.apps_enabled_for_auth(auth.as_ref()) - } - - pub fn apps_enabled_cached(&self, auth_manager: Option<&AuthManager>) -> bool { - let auth = auth_manager.and_then(AuthManager::auth_cached); - self.apps_enabled_for_auth(auth.as_ref()) - } - - pub fn apps_enabled_for_auth(&self, auth: Option<&CodexAuth>) -> bool { - self.enabled(Feature::Apps) && auth.is_some_and(CodexAuth::is_chatgpt_auth) + pub fn apps_enabled_for_auth(&self, has_chatgpt_auth: bool) -> bool { + self.enabled(Feature::Apps) && has_chatgpt_auth } pub fn use_legacy_landlock(&self) -> bool { @@ -638,7 +623,11 @@ pub const FEATURES: &[FeatureSpec] = &[ FeatureSpec { id: Feature::ImageDetailOriginal, key: "image_detail_original", - stage: Stage::UnderDevelopment, + stage: Stage::Experimental { + name: "Original image detail", + menu_description: "Let the model inspect tool-emitted images at full resolution on supported models instead of a resized approximation. This affects tool-emitted images such as those produced by `view_image`, not images attached directly in the UI. It is particularly important for localization and precise UI targeting, for reading small text, and for reasoning about precise layout.", + announcement: "NEW: Original image detail is now available in /experimental. Enable it to let tools request full-resolution image detail on supported models for CUA and localization tasks.", + }, default_enabled: false, }, FeatureSpec { @@ -719,6 +708,12 @@ pub const FEATURES: &[FeatureSpec] = &[ stage: Stage::UnderDevelopment, default_enabled: false, }, + FeatureSpec { + id: Feature::DebugHideSpawnAgentMetadata, + key: "debug_hide_spawn_agent_metadata", + stage: Stage::UnderDevelopment, + default_enabled: false, + }, FeatureSpec { id: Feature::SpawnCsv, key: "enable_fanout", @@ -825,6 +820,12 @@ pub const FEATURES: &[FeatureSpec] = &[ stage: Stage::UnderDevelopment, default_enabled: false, }, + FeatureSpec { + id: Feature::RemoteControl, + key: "remote_control", + stage: Stage::UnderDevelopment, + default_enabled: false, + }, FeatureSpec { id: Feature::TuiAppServer, key: "tui_app_server", diff --git a/codex-rs/features/src/tests.rs b/codex-rs/features/src/tests.rs index 0777262065..23653fadcd 100644 --- a/codex-rs/features/src/tests.rs +++ b/codex-rs/features/src/tests.rs @@ -157,12 +157,9 @@ fn tool_call_mcp_elicitation_is_stable_and_enabled_by_default() { } #[test] -fn image_detail_original_feature_is_under_development() { - assert_eq!( - Feature::ImageDetailOriginal.stage(), - Stage::UnderDevelopment - ); - assert_eq!(Feature::ImageDetailOriginal.default_enabled(), false); +fn remote_control_is_under_development() { + assert_eq!(Feature::RemoteControl.stage(), Stage::UnderDevelopment); + assert_eq!(Feature::RemoteControl.default_enabled(), false); } #[test] @@ -201,16 +198,11 @@ fn enable_fanout_normalization_enables_multi_agent_one_way() { #[test] fn apps_require_feature_flag_and_chatgpt_auth() { let mut features = Features::with_defaults(); - assert!(!features.apps_enabled_for_auth(/*auth*/ None)); + assert!(!features.apps_enabled_for_auth(/*has_chatgpt_auth*/ false)); features.enable(Feature::Apps); - assert!(!features.apps_enabled_for_auth(/*auth*/ None)); - - let api_key_auth = codex_login::CodexAuth::from_api_key("test-api-key"); - assert!(!features.apps_enabled_for_auth(Some(&api_key_auth))); - - let chatgpt_auth = codex_login::CodexAuth::create_dummy_chatgpt_auth_for_testing(); - assert!(features.apps_enabled_for_auth(Some(&chatgpt_auth))); + assert!(!features.apps_enabled_for_auth(/*has_chatgpt_auth*/ false)); + assert!(features.apps_enabled_for_auth(/*has_chatgpt_auth*/ true)); } #[test] diff --git a/codex-rs/feedback/src/lib.rs b/codex-rs/feedback/src/lib.rs index 8d9489dbe6..712aeee219 100644 --- a/codex-rs/feedback/src/lib.rs +++ b/codex-rs/feedback/src/lib.rs @@ -14,8 +14,6 @@ use anyhow::anyhow; use codex_login::AuthEnvTelemetry; use codex_protocol::ThreadId; use codex_protocol::protocol::SessionSource; -use feedback_diagnostics::FEEDBACK_DIAGNOSTICS_ATTACHMENT_FILENAME; -use feedback_diagnostics::FeedbackDiagnostics; use tracing::Event; use tracing::Level; use tracing::field::Visit; @@ -24,7 +22,10 @@ use tracing_subscriber::filter::Targets; use tracing_subscriber::fmt::writer::MakeWriter; use tracing_subscriber::registry::LookupSpan; -pub mod feedback_diagnostics; +pub(crate) mod feedback_diagnostics; +pub use feedback_diagnostics::FEEDBACK_DIAGNOSTICS_ATTACHMENT_FILENAME; +pub use feedback_diagnostics::FeedbackDiagnostic; +pub use feedback_diagnostics::FeedbackDiagnostics; const DEFAULT_MAX_BYTES: usize = 4 * 1024 * 1024; // 4 MiB const SENTRY_DSN: &str = @@ -609,7 +610,7 @@ mod tests { use std::fs; use super::*; - use feedback_diagnostics::FeedbackDiagnostic; + use crate::FeedbackDiagnostic; use pretty_assertions::assert_eq; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; diff --git a/codex-rs/git-utils/src/info.rs b/codex-rs/git-utils/src/info.rs index 06e39b5655..048a8cedb6 100644 --- a/codex-rs/git-utils/src/info.rs +++ b/codex-rs/git-utils/src/info.rs @@ -632,9 +632,7 @@ pub fn resolve_root_git_project_for_trust(cwd: &Path) -> Option { } let git_dir_path = canonicalize_or_raw( - AbsolutePathBuf::resolve_path_against_base(git_dir_rel, &repo_root) - .ok()? - .into_path_buf(), + AbsolutePathBuf::resolve_path_against_base(git_dir_rel, &repo_root).into_path_buf(), ); let worktrees_dir = git_dir_path.parent()?; if worktrees_dir.file_name() != Some(OsStr::new("worktrees")) { diff --git a/codex-rs/hooks/src/engine/discovery.rs b/codex-rs/hooks/src/engine/discovery.rs index f39eb77438..465caa376d 100644 --- a/codex-rs/hooks/src/engine/discovery.rs +++ b/codex-rs/hooks/src/engine/discovery.rs @@ -35,16 +35,7 @@ pub(crate) fn discover_handlers(config_layer_stack: Option<&ConfigLayerStack>) - let Some(folder) = layer.config_folder() else { continue; }; - let source_path = match folder.join("hooks.json") { - Ok(source_path) => source_path, - Err(err) => { - warnings.push(format!( - "failed to resolve hooks config path from {}: {err}", - folder.display() - )); - continue; - } - }; + let source_path = folder.join("hooks.json"); if !source_path.as_path().is_file() { continue; } diff --git a/codex-rs/hooks/src/lib.rs b/codex-rs/hooks/src/lib.rs index 16334a6476..784b414427 100644 --- a/codex-rs/hooks/src/lib.rs +++ b/codex-rs/hooks/src/lib.rs @@ -1,5 +1,5 @@ mod engine; -pub mod events; +pub(crate) mod events; mod legacy_notify; mod registry; mod schema; diff --git a/codex-rs/linux-sandbox/Cargo.toml b/codex-rs/linux-sandbox/Cargo.toml index 524c574fb0..751d6d5d17 100644 --- a/codex-rs/linux-sandbox/Cargo.toml +++ b/codex-rs/linux-sandbox/Cargo.toml @@ -17,8 +17,6 @@ workspace = true [target.'cfg(target_os = "linux")'.dependencies] clap = { workspace = true, features = ["derive"] } -codex-config = { workspace = true } -codex-core = { workspace = true } codex-protocol = { workspace = true } codex-sandboxing = { workspace = true } codex-utils-absolute-path = { workspace = true } @@ -30,6 +28,8 @@ serde_json = { workspace = true } url = { workspace = true } [target.'cfg(target_os = "linux")'.dev-dependencies] +codex-config = { workspace = true } +codex-core = { workspace = true } pretty_assertions = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true, features = [ diff --git a/codex-rs/linux-sandbox/README.md b/codex-rs/linux-sandbox/README.md index d8af1959a4..340b109101 100644 --- a/codex-rs/linux-sandbox/README.md +++ b/codex-rs/linux-sandbox/README.md @@ -15,7 +15,8 @@ no-`--argv0` compatibility path for the inner re-exec. If `bwrap` is missing, the helper falls back to the vendored bubblewrap path compiled into this binary. Codex also surfaces a startup warning when `bwrap` is missing so users know it -is falling back to the vendored helper. +is falling back to the vendored helper. Codex surfaces the same startup warning +path when bubblewrap cannot create user namespaces. **Current Behavior** - Legacy `SandboxPolicy` / `sandbox_mode` configs remain supported. @@ -28,6 +29,8 @@ is falling back to the vendored helper. path. - If `bwrap` is missing, Codex also surfaces a startup warning instead of printing directly from the sandbox helper. +- If bubblewrap cannot create user namespaces, Codex surfaces a startup warning + instead of waiting for a runtime sandbox failure. - Legacy Landlock + mount protections remain available as an explicit legacy fallback path. - Set `features.use_legacy_landlock = true` (or CLI `-c use_legacy_landlock=true`) diff --git a/codex-rs/login/Cargo.toml b/codex-rs/login/Cargo.toml index 459d76c383..9c2d021c76 100644 --- a/codex-rs/login/Cargo.toml +++ b/codex-rs/login/Cargo.toml @@ -25,7 +25,6 @@ once_cell = { workspace = true } os_info = { workspace = true } rand = { workspace = true } reqwest = { workspace = true, features = ["json", "blocking"] } -schemars = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } sha2 = { workspace = true } diff --git a/codex-rs/login/src/api_bridge.rs b/codex-rs/login/src/api_bridge.rs index 4c432ff18d..d8b9dbb77c 100644 --- a/codex-rs/login/src/api_bridge.rs +++ b/codex-rs/login/src/api_bridge.rs @@ -1,4 +1,4 @@ -use codex_api::api_bridge::CoreAuthProvider; +use codex_api::CoreAuthProvider; use codex_model_provider_info::ModelProviderInfo; use crate::CodexAuth; diff --git a/codex-rs/login/src/auth/manager.rs b/codex-rs/login/src/auth/manager.rs index 9f58f0f090..71857c9700 100644 --- a/codex-rs/login/src/auth/manager.rs +++ b/codex-rs/login/src/auth/manager.rs @@ -20,7 +20,6 @@ use codex_protocol::config_types::ForcedLoginMethod; use codex_protocol::config_types::ModelProviderAuthInfo; use super::external_bearer::BearerTokenRefresher; -pub use crate::auth::storage::AuthCredentialsStoreMode; pub use crate::auth::storage::AuthDotJson; use crate::auth::storage::AuthStorageBackend; use crate::auth::storage::create_auth_storage; @@ -30,6 +29,7 @@ use crate::token_data::TokenData; use crate::token_data::parse_chatgpt_jwt_claims; use crate::token_data::parse_jwt_expiration; use codex_client::CodexHttpClient; +use codex_config::types::AuthCredentialsStoreMode; use codex_protocol::account::PlanType as AccountPlanType; use codex_protocol::auth::KnownPlan as InternalKnownPlan; use codex_protocol::auth::PlanType as InternalPlanType; @@ -1108,6 +1108,23 @@ pub struct AuthManager { external_auth: RwLock>>, } +/// Configuration view required to construct a shared [`AuthManager`]. +/// +/// Implementations should return the auth-related config values for the +/// already-resolved runtime configuration. The primary implementation is +/// `codex_core::config::Config`, but this trait keeps `codex-login` independent +/// from `codex-core`. +pub trait AuthManagerConfig { + /// Returns the Codex home directory used for auth storage. + fn codex_home(&self) -> PathBuf; + + /// Returns the CLI auth credential storage mode for auth loading. + fn cli_auth_credentials_store_mode(&self) -> AuthCredentialsStoreMode; + + /// Returns the workspace ID that ChatGPT auth should be restricted to, if any. + fn forced_chatgpt_workspace_id(&self) -> Option; +} + impl Debug for AuthManager { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("AuthManager") @@ -1404,19 +1421,18 @@ impl AuthManager { )) } - pub fn shared_with_external_auth( - codex_home: PathBuf, + /// Convenience constructor returning an `Arc` wrapper from resolved config. + pub fn shared_from_config( + config: &impl AuthManagerConfig, enable_codex_api_key_env: bool, - auth_credentials_store_mode: AuthCredentialsStoreMode, - external_auth: Arc, ) -> Arc { - let manager = Self::shared( - codex_home, + let auth_manager = Self::shared( + config.codex_home(), enable_codex_api_key_env, - auth_credentials_store_mode, + config.cli_auth_credentials_store_mode(), ); - manager.set_external_auth(external_auth); - manager + auth_manager.set_forced_chatgpt_workspace_id(config.forced_chatgpt_workspace_id()); + auth_manager } pub fn unauthorized_recovery(self: &Arc) -> UnauthorizedRecovery { diff --git a/codex-rs/login/src/auth/storage.rs b/codex-rs/login/src/auth/storage.rs index b1e04b8685..97e801415c 100644 --- a/codex-rs/login/src/auth/storage.rs +++ b/codex-rs/login/src/auth/storage.rs @@ -1,6 +1,5 @@ use chrono::DateTime; use chrono::Utc; -use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use sha2::Digest; @@ -21,25 +20,11 @@ use tracing::warn; use crate::token_data::TokenData; use codex_app_server_protocol::AuthMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_keyring_store::DefaultKeyringStore; use codex_keyring_store::KeyringStore; use once_cell::sync::Lazy; -/// Determine where Codex should store CLI auth credentials. -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[serde(rename_all = "lowercase")] -pub enum AuthCredentialsStoreMode { - #[default] - /// Persist credentials in CODEX_HOME/auth.json. - File, - /// Persist credentials in the keyring. Fail if unavailable. - Keyring, - /// Use keyring when available; otherwise, fall back to a file in CODEX_HOME. - Auto, - /// Store credentials in memory only for the current process. - Ephemeral, -} - /// Expected structure for $CODEX_HOME/auth.json. #[derive(Deserialize, Serialize, Clone, Debug, PartialEq)] pub struct AuthDotJson { diff --git a/codex-rs/login/src/lib.rs b/codex-rs/login/src/lib.rs index ad77d82389..1b52223bc3 100644 --- a/codex-rs/login/src/lib.rs +++ b/codex-rs/login/src/lib.rs @@ -9,6 +9,7 @@ mod pkce; mod server; pub use codex_client::BuildCustomCaTransportError as BuildLoginHttpClientError; +pub use codex_config::types::AuthCredentialsStoreMode; pub use device_code_auth::DeviceCode; pub use device_code_auth::complete_device_code_login; pub use device_code_auth::request_device_code; @@ -20,9 +21,9 @@ pub use server::run_login_server; pub use api_bridge::auth_provider_from_auth; pub use auth::AuthConfig; -pub use auth::AuthCredentialsStoreMode; pub use auth::AuthDotJson; pub use auth::AuthManager; +pub use auth::AuthManagerConfig; pub use auth::CLIENT_ID; pub use auth::CODEX_API_KEY_ENV_VAR; pub use auth::CodexAuth; diff --git a/codex-rs/login/src/server.rs b/codex-rs/login/src/server.rs index c811fa36df..169a8a3091 100644 --- a/codex-rs/login/src/server.rs +++ b/codex-rs/login/src/server.rs @@ -24,7 +24,6 @@ use std::sync::LazyLock; use std::thread; use std::time::Duration; -use crate::auth::AuthCredentialsStoreMode; use crate::auth::AuthDotJson; use crate::auth::save_auth; use crate::default_client::originator; @@ -36,6 +35,7 @@ use base64::Engine; use chrono::Utc; use codex_app_server_protocol::AuthMode; use codex_client::build_reqwest_client_with_custom_ca; +use codex_config::types::AuthCredentialsStoreMode; use codex_utils_template::Template; use rand::RngCore; use serde_json::Value as JsonValue; diff --git a/codex-rs/login/tests/suite/auth_refresh.rs b/codex-rs/login/tests/suite/auth_refresh.rs index 94ba8220e3..bf9e03bc26 100644 --- a/codex-rs/login/tests/suite/auth_refresh.rs +++ b/codex-rs/login/tests/suite/auth_refresh.rs @@ -4,7 +4,7 @@ use base64::Engine; use chrono::Duration; use chrono::Utc; use codex_app_server_protocol::AuthMode; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_login::AuthDotJson; use codex_login::AuthManager; use codex_login::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR; diff --git a/codex-rs/login/tests/suite/device_code_login.rs b/codex-rs/login/tests/suite/device_code_login.rs index 80c4fc0e56..bed94c7005 100644 --- a/codex-rs/login/tests/suite/device_code_login.rs +++ b/codex-rs/login/tests/suite/device_code_login.rs @@ -3,8 +3,8 @@ use anyhow::Context; use base64::Engine; use base64::engine::general_purpose::URL_SAFE_NO_PAD; +use codex_config::types::AuthCredentialsStoreMode; use codex_login::ServerOptions; -use codex_login::auth::AuthCredentialsStoreMode; use codex_login::auth::load_auth_dot_json; use codex_login::run_device_code_login; use serde_json::json; diff --git a/codex-rs/login/tests/suite/login_server_e2e.rs b/codex-rs/login/tests/suite/login_server_e2e.rs index 5b0ddd9b72..9522f5b0b0 100644 --- a/codex-rs/login/tests/suite/login_server_e2e.rs +++ b/codex-rs/login/tests/suite/login_server_e2e.rs @@ -7,8 +7,8 @@ use std::time::Duration; use anyhow::Result; use base64::Engine; +use codex_config::types::AuthCredentialsStoreMode; use codex_login::ServerOptions; -use codex_login::auth::AuthCredentialsStoreMode; use codex_login::run_login_server; use core_test_support::skip_if_no_network; use tempfile::tempdir; diff --git a/codex-rs/mcp-server/Cargo.toml b/codex-rs/mcp-server/Cargo.toml index 29de014d4a..9deadea81b 100644 --- a/codex-rs/mcp-server/Cargo.toml +++ b/codex-rs/mcp-server/Cargo.toml @@ -25,7 +25,6 @@ codex-features = { workspace = true } codex-login = { workspace = true } codex-models-manager = { workspace = true } codex-protocol = { workspace = true } -codex-shell-command = { workspace = true } codex-utils-cli = { workspace = true } codex-utils-json-to-toml = { workspace = true } rmcp = { workspace = true } @@ -44,6 +43,7 @@ tracing = { workspace = true, features = ["log"] } tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] } [dev-dependencies] +codex-shell-command = { workspace = true } core_test_support = { workspace = true } mcp_test_support = { workspace = true } os_info = { workspace = true } diff --git a/codex-rs/mcp-server/src/codex_tool_config.rs b/codex-rs/mcp-server/src/codex_tool_config.rs index f83fd4fd5a..341f0ac4b2 100644 --- a/codex-rs/mcp-server/src/codex_tool_config.rs +++ b/codex-rs/mcp-server/src/codex_tool_config.rs @@ -179,8 +179,8 @@ impl CodexToolCallParam { codex_self_exe: arg0_paths.codex_self_exe.clone(), codex_linux_sandbox_exe: arg0_paths.codex_linux_sandbox_exe.clone(), main_execve_wrapper_exe: arg0_paths.main_execve_wrapper_exe.clone(), - base_instructions, - developer_instructions, + base_instructions: base_instructions.map(Some), + developer_instructions: developer_instructions.map(Some), compact_prompt, ..Default::default() }; diff --git a/codex-rs/mcp-server/src/lib.rs b/codex-rs/mcp-server/src/lib.rs index 2596339acd..ccaaba3cd7 100644 --- a/codex-rs/mcp-server/src/lib.rs +++ b/codex-rs/mcp-server/src/lib.rs @@ -8,6 +8,7 @@ use std::sync::Arc; use codex_arg0::Arg0DispatchPaths; use codex_core::config::Config; use codex_exec_server::EnvironmentManager; +use codex_login::default_client::set_default_client_residency_requirement; use codex_utils_cli::CliConfigOverrides; use rmcp::model::ClientNotification; @@ -71,6 +72,7 @@ pub async fn run_main( .map_err(|e| { std::io::Error::new(ErrorKind::InvalidData, format!("error loading config: {e}")) })?; + set_default_client_residency_requirement(config.enforce_residency.value()); let otel = codex_core::otel_init::build_provider( &config, diff --git a/codex-rs/mcp-server/src/message_processor.rs b/codex-rs/mcp-server/src/message_processor.rs index a0d4321173..46470f994a 100644 --- a/codex-rs/mcp-server/src/message_processor.rs +++ b/codex-rs/mcp-server/src/message_processor.rs @@ -56,10 +56,9 @@ impl MessageProcessor { environment_manager: Arc, ) -> Self { let outgoing = Arc::new(outgoing); - let auth_manager = AuthManager::shared( - config.codex_home.clone(), + let auth_manager = AuthManager::shared_from_config( + config.as_ref(), /*enable_codex_api_key_env*/ false, - config.cli_auth_credentials_store_mode, ); let thread_manager = Arc::new(ThreadManager::new( config.as_ref(), diff --git a/codex-rs/model-provider-info/src/lib.rs b/codex-rs/model-provider-info/src/lib.rs index 968bcc8ed0..f39ff0c7fe 100644 --- a/codex-rs/model-provider-info/src/lib.rs +++ b/codex-rs/model-provider-info/src/lib.rs @@ -6,7 +6,7 @@ //! key. These override or extend the defaults at runtime. use codex_api::Provider as ApiProvider; -use codex_api::provider::RetryConfig as ApiRetryConfig; +use codex_api::RetryConfig as ApiRetryConfig; use codex_app_server_protocol::AuthMode; use codex_protocol::config_types::ModelProviderAuthInfo; use codex_protocol::error::CodexErr; diff --git a/codex-rs/model-provider-info/src/model_provider_info_tests.rs b/codex-rs/model-provider-info/src/model_provider_info_tests.rs index e456a6a5c2..bdc2e73b1a 100644 --- a/codex-rs/model-provider-info/src/model_provider_info_tests.rs +++ b/codex-rs/model-provider-info/src/model_provider_info_tests.rs @@ -152,7 +152,7 @@ args = ["--format=text"] args: vec!["--format=text".to_string()], timeout_ms: NonZeroU64::new(5_000).unwrap(), refresh_interval_ms: 300_000, - cwd: AbsolutePathBuf::resolve_path_against_base(".", base_dir.path()).unwrap(), + cwd: AbsolutePathBuf::resolve_path_against_base(".", base_dir.path()), }) ); } diff --git a/codex-rs/models-manager/Cargo.toml b/codex-rs/models-manager/Cargo.toml index fbd5d5f3e8..58eff2437a 100644 --- a/codex-rs/models-manager/Cargo.toml +++ b/codex-rs/models-manager/Cargo.toml @@ -17,6 +17,7 @@ chrono = { workspace = true, features = ["serde"] } codex-api = { workspace = true } codex-app-server-protocol = { workspace = true } codex-collaboration-mode-templates = { workspace = true } +codex-config = { workspace = true } codex-feedback = { workspace = true } codex-login = { workspace = true } codex-model-provider-info = { workspace = true } diff --git a/codex-rs/models-manager/src/lib.rs b/codex-rs/models-manager/src/lib.rs index 5f26b37f89..e99c33edb9 100644 --- a/codex-rs/models-manager/src/lib.rs +++ b/codex-rs/models-manager/src/lib.rs @@ -1,12 +1,11 @@ -pub mod cache; +pub(crate) mod cache; pub mod collaboration_mode_presets; -pub mod config; +pub(crate) mod config; pub mod manager; pub mod model_info; pub mod model_presets; pub use codex_app_server_protocol::AuthMode; -pub use codex_login::AuthCredentialsStoreMode; pub use codex_login::AuthManager; pub use codex_login::CodexAuth; pub use codex_model_provider_info::ModelProviderInfo; diff --git a/codex-rs/models-manager/src/manager.rs b/codex-rs/models-manager/src/manager.rs index 984c612296..a7d4aa12ab 100644 --- a/codex-rs/models-manager/src/manager.rs +++ b/codex-rs/models-manager/src/manager.rs @@ -7,7 +7,7 @@ use codex_api::ModelsClient; use codex_api::RequestTelemetry; use codex_api::ReqwestTransport; use codex_api::TransportError; -use codex_api::api_bridge::map_api_error; +use codex_api::map_api_error; use codex_app_server_protocol::AuthMode; use codex_feedback::FeedbackRequestTags; use codex_feedback::emit_feedback_request_tags_with_auth_env; diff --git a/codex-rs/models-manager/src/manager_tests.rs b/codex-rs/models-manager/src/manager_tests.rs index b955786cc5..23de81781a 100644 --- a/codex-rs/models-manager/src/manager_tests.rs +++ b/codex-rs/models-manager/src/manager_tests.rs @@ -3,7 +3,7 @@ use crate::ModelsManagerConfig; use base64::Engine as _; use chrono::Utc; use codex_api::TransportError; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_login::AuthManager; use codex_login::CodexAuth; use codex_model_provider_info::WireApi; diff --git a/codex-rs/otel/src/events/session_telemetry.rs b/codex-rs/otel/src/events/session_telemetry.rs index 42fd6e50c3..b1bf9b0947 100644 --- a/codex-rs/otel/src/events/session_telemetry.rs +++ b/codex-rs/otel/src/events/session_telemetry.rs @@ -3,29 +3,29 @@ use crate::ToolDecisionSource; use crate::events::shared::log_and_trace_event; use crate::events::shared::log_event; use crate::events::shared::trace_event; +use crate::metrics::API_CALL_COUNT_METRIC; +use crate::metrics::API_CALL_DURATION_METRIC; use crate::metrics::MetricsClient; use crate::metrics::MetricsConfig; use crate::metrics::MetricsError; +use crate::metrics::PROFILE_USAGE_METRIC; +use crate::metrics::RESPONSES_API_ENGINE_IAPI_TBT_DURATION_METRIC; +use crate::metrics::RESPONSES_API_ENGINE_IAPI_TTFT_DURATION_METRIC; +use crate::metrics::RESPONSES_API_ENGINE_SERVICE_TBT_DURATION_METRIC; +use crate::metrics::RESPONSES_API_ENGINE_SERVICE_TTFT_DURATION_METRIC; +use crate::metrics::RESPONSES_API_INFERENCE_TIME_DURATION_METRIC; +use crate::metrics::RESPONSES_API_OVERHEAD_DURATION_METRIC; use crate::metrics::Result as MetricsResult; -use crate::metrics::names::API_CALL_COUNT_METRIC; -use crate::metrics::names::API_CALL_DURATION_METRIC; -use crate::metrics::names::PROFILE_USAGE_METRIC; -use crate::metrics::names::RESPONSES_API_ENGINE_IAPI_TBT_DURATION_METRIC; -use crate::metrics::names::RESPONSES_API_ENGINE_IAPI_TTFT_DURATION_METRIC; -use crate::metrics::names::RESPONSES_API_ENGINE_SERVICE_TBT_DURATION_METRIC; -use crate::metrics::names::RESPONSES_API_ENGINE_SERVICE_TTFT_DURATION_METRIC; -use crate::metrics::names::RESPONSES_API_INFERENCE_TIME_DURATION_METRIC; -use crate::metrics::names::RESPONSES_API_OVERHEAD_DURATION_METRIC; -use crate::metrics::names::SSE_EVENT_COUNT_METRIC; -use crate::metrics::names::SSE_EVENT_DURATION_METRIC; -use crate::metrics::names::TOOL_CALL_COUNT_METRIC; -use crate::metrics::names::TOOL_CALL_DURATION_METRIC; -use crate::metrics::names::WEBSOCKET_EVENT_COUNT_METRIC; -use crate::metrics::names::WEBSOCKET_EVENT_DURATION_METRIC; -use crate::metrics::names::WEBSOCKET_REQUEST_COUNT_METRIC; -use crate::metrics::names::WEBSOCKET_REQUEST_DURATION_METRIC; +use crate::metrics::SSE_EVENT_COUNT_METRIC; +use crate::metrics::SSE_EVENT_DURATION_METRIC; +use crate::metrics::SessionMetricTagValues; +use crate::metrics::TOOL_CALL_COUNT_METRIC; +use crate::metrics::TOOL_CALL_DURATION_METRIC; +use crate::metrics::WEBSOCKET_EVENT_COUNT_METRIC; +use crate::metrics::WEBSOCKET_EVENT_DURATION_METRIC; +use crate::metrics::WEBSOCKET_REQUEST_COUNT_METRIC; +use crate::metrics::WEBSOCKET_REQUEST_DURATION_METRIC; use crate::metrics::runtime_metrics::RuntimeMetricsSummary; -use crate::metrics::tags::SessionMetricTagValues; use crate::metrics::timer::Timer; use crate::provider::OtelProvider; use crate::sanitize_metric_tag_value; diff --git a/codex-rs/otel/src/lib.rs b/codex-rs/otel/src/lib.rs index ea13ad9b96..c7d0b7c419 100644 --- a/codex-rs/otel/src/lib.rs +++ b/codex-rs/otel/src/lib.rs @@ -1,23 +1,27 @@ -pub mod config; +pub(crate) mod config; mod events; -pub mod metrics; -pub mod provider; -pub mod trace_context; +pub(crate) mod metrics; +pub(crate) mod provider; +pub(crate) mod trace_context; mod otlp; mod targets; -use crate::metrics::MetricsError; use crate::metrics::Result as MetricsResult; use serde::Serialize; use strum_macros::Display; +pub use crate::config::OtelExporter; +pub use crate::config::OtelHttpProtocol; +pub use crate::config::OtelSettings; +pub use crate::config::OtelTlsConfig; pub use crate::events::session_telemetry::AuthEnvTelemetryMetadata; pub use crate::events::session_telemetry::SessionTelemetry; pub use crate::events::session_telemetry::SessionTelemetryMetadata; pub use crate::metrics::runtime_metrics::RuntimeMetricTotals; pub use crate::metrics::runtime_metrics::RuntimeMetricsSummary; pub use crate::metrics::timer::Timer; +pub use crate::metrics::*; pub use crate::provider::OtelProvider; pub use crate::trace_context::context_from_w3c_trace_context; pub use crate::trace_context::current_span_trace_id; diff --git a/codex-rs/otel/src/metrics/mod.rs b/codex-rs/otel/src/metrics/mod.rs index 1d1195e5fe..bcbb85d35c 100644 --- a/codex-rs/otel/src/metrics/mod.rs +++ b/codex-rs/otel/src/metrics/mod.rs @@ -1,9 +1,9 @@ mod client; mod config; mod error; -pub mod names; +pub(crate) mod names; pub(crate) mod runtime_metrics; -pub mod tags; +pub(crate) mod tags; pub(crate) mod timer; pub(crate) mod validation; @@ -12,7 +12,9 @@ pub use crate::metrics::config::MetricsConfig; pub use crate::metrics::config::MetricsExporter; pub use crate::metrics::error::MetricsError; pub use crate::metrics::error::Result; +pub use names::*; use std::sync::OnceLock; +pub use tags::SessionMetricTagValues; static GLOBAL_METRICS: OnceLock = OnceLock::new(); diff --git a/codex-rs/otel/tests/harness/mod.rs b/codex-rs/otel/tests/harness/mod.rs index acdba0b7e1..fbba56411c 100644 --- a/codex-rs/otel/tests/harness/mod.rs +++ b/codex-rs/otel/tests/harness/mod.rs @@ -1,6 +1,6 @@ -use codex_otel::metrics::MetricsClient; -use codex_otel::metrics::MetricsConfig; -use codex_otel::metrics::Result; +use codex_otel::MetricsClient; +use codex_otel::MetricsConfig; +use codex_otel::Result; use opentelemetry::KeyValue; use opentelemetry_sdk::metrics::InMemoryMetricExporter; use opentelemetry_sdk::metrics::data::AggregatedMetrics; diff --git a/codex-rs/otel/tests/suite/manager_metrics.rs b/codex-rs/otel/tests/suite/manager_metrics.rs index 8a61403c77..d95f42dfc5 100644 --- a/codex-rs/otel/tests/suite/manager_metrics.rs +++ b/codex-rs/otel/tests/suite/manager_metrics.rs @@ -2,9 +2,9 @@ use crate::harness::attributes_to_map; use crate::harness::build_metrics_with_defaults; use crate::harness::find_metric; use crate::harness::latest_metrics; +use codex_otel::Result; use codex_otel::SessionTelemetry; use codex_otel::TelemetryAuthMode; -use codex_otel::metrics::Result; use codex_protocol::ThreadId; use codex_protocol::protocol::SessionSource; use opentelemetry_sdk::metrics::data::AggregatedMetrics; diff --git a/codex-rs/otel/tests/suite/otlp_http_loopback.rs b/codex-rs/otel/tests/suite/otlp_http_loopback.rs index eb84dfdf5b..fd4e3531d8 100644 --- a/codex-rs/otel/tests/suite/otlp_http_loopback.rs +++ b/codex-rs/otel/tests/suite/otlp_http_loopback.rs @@ -1,10 +1,10 @@ +use codex_otel::MetricsClient; +use codex_otel::MetricsConfig; +use codex_otel::OtelExporter; +use codex_otel::OtelHttpProtocol; use codex_otel::OtelProvider; -use codex_otel::config::OtelExporter; -use codex_otel::config::OtelHttpProtocol; -use codex_otel::config::OtelSettings; -use codex_otel::metrics::MetricsClient; -use codex_otel::metrics::MetricsConfig; -use codex_otel::metrics::Result; +use codex_otel::OtelSettings; +use codex_otel::Result; use std::collections::HashMap; use std::io::Read as _; use std::io::Write as _; diff --git a/codex-rs/otel/tests/suite/runtime_summary.rs b/codex-rs/otel/tests/suite/runtime_summary.rs index 7ffa03e026..21fc7bf76c 100644 --- a/codex-rs/otel/tests/suite/runtime_summary.rs +++ b/codex-rs/otel/tests/suite/runtime_summary.rs @@ -1,10 +1,10 @@ +use codex_otel::MetricsClient; +use codex_otel::MetricsConfig; +use codex_otel::Result; use codex_otel::RuntimeMetricTotals; use codex_otel::RuntimeMetricsSummary; use codex_otel::SessionTelemetry; use codex_otel::TelemetryAuthMode; -use codex_otel::metrics::MetricsClient; -use codex_otel::metrics::MetricsConfig; -use codex_otel::metrics::Result; use codex_protocol::ThreadId; use codex_protocol::protocol::SessionSource; use eventsource_stream::Event as StreamEvent; diff --git a/codex-rs/otel/tests/suite/send.rs b/codex-rs/otel/tests/suite/send.rs index a50c579089..fc382bf88a 100644 --- a/codex-rs/otel/tests/suite/send.rs +++ b/codex-rs/otel/tests/suite/send.rs @@ -3,7 +3,7 @@ use crate::harness::build_metrics_with_defaults; use crate::harness::find_metric; use crate::harness::histogram_data; use crate::harness::latest_metrics; -use codex_otel::metrics::Result; +use codex_otel::Result; use pretty_assertions::assert_eq; use std::collections::BTreeMap; diff --git a/codex-rs/otel/tests/suite/snapshot.rs b/codex-rs/otel/tests/suite/snapshot.rs index 4686e79d3d..e631100004 100644 --- a/codex-rs/otel/tests/suite/snapshot.rs +++ b/codex-rs/otel/tests/suite/snapshot.rs @@ -1,10 +1,10 @@ use crate::harness::attributes_to_map; use crate::harness::find_metric; +use codex_otel::MetricsClient; +use codex_otel::MetricsConfig; +use codex_otel::Result; use codex_otel::SessionTelemetry; use codex_otel::TelemetryAuthMode; -use codex_otel::metrics::MetricsClient; -use codex_otel::metrics::MetricsConfig; -use codex_otel::metrics::Result; use codex_protocol::ThreadId; use codex_protocol::protocol::SessionSource; use opentelemetry_sdk::metrics::InMemoryMetricExporter; diff --git a/codex-rs/otel/tests/suite/timing.rs b/codex-rs/otel/tests/suite/timing.rs index 8398e1a0d7..0cf73b9a56 100644 --- a/codex-rs/otel/tests/suite/timing.rs +++ b/codex-rs/otel/tests/suite/timing.rs @@ -2,7 +2,7 @@ use crate::harness::attributes_to_map; use crate::harness::build_metrics_with_defaults; use crate::harness::histogram_data; use crate::harness::latest_metrics; -use codex_otel::metrics::Result; +use codex_otel::Result; use pretty_assertions::assert_eq; use std::time::Duration; diff --git a/codex-rs/otel/tests/suite/validation.rs b/codex-rs/otel/tests/suite/validation.rs index 81f07ed147..6308506112 100644 --- a/codex-rs/otel/tests/suite/validation.rs +++ b/codex-rs/otel/tests/suite/validation.rs @@ -1,7 +1,7 @@ -use codex_otel::metrics::MetricsClient; -use codex_otel::metrics::MetricsConfig; -use codex_otel::metrics::MetricsError; -use codex_otel::metrics::Result; +use codex_otel::MetricsClient; +use codex_otel::MetricsConfig; +use codex_otel::MetricsError; +use codex_otel::Result; use opentelemetry_sdk::metrics::InMemoryMetricExporter; fn build_in_memory_client() -> Result { diff --git a/codex-rs/protocol/src/lib.rs b/codex-rs/protocol/src/lib.rs index de580de6f3..e78bfe6e45 100644 --- a/codex-rs/protocol/src/lib.rs +++ b/codex-rs/protocol/src/lib.rs @@ -23,4 +23,5 @@ pub mod plan_tool; pub mod protocol; pub mod request_permissions; pub mod request_user_input; +mod serde_helpers; pub mod user_input; diff --git a/codex-rs/protocol/src/mcp.rs b/codex-rs/protocol/src/mcp.rs index d2a8b0ccd8..f6e69743b9 100644 --- a/codex-rs/protocol/src/mcp.rs +++ b/codex-rs/protocol/src/mcp.rs @@ -82,6 +82,38 @@ pub struct Resource { pub meta: Option, } +/// Contents returned when reading a resource from an MCP server. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, JsonSchema, TS)] +#[serde(untagged)] +pub enum ResourceContent { + #[serde(rename_all = "camelCase")] + #[ts(rename_all = "camelCase")] + Text { + /// The URI of this resource. + uri: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + #[ts(optional)] + mime_type: Option, + text: String, + #[serde(rename = "_meta", default, skip_serializing_if = "Option::is_none")] + #[ts(optional)] + meta: Option, + }, + #[serde(rename_all = "camelCase")] + #[ts(rename_all = "camelCase")] + Blob { + /// The URI of this resource. + uri: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + #[ts(optional)] + mime_type: Option, + blob: String, + #[serde(rename = "_meta", default, skip_serializing_if = "Option::is_none")] + #[ts(optional)] + meta: Option, + }, +} + /// A template description for resources available on the server. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, JsonSchema, TS)] #[serde(rename_all = "camelCase")] diff --git a/codex-rs/protocol/src/models.rs b/codex-rs/protocol/src/models.rs index d4e568fea9..0f18b68393 100644 --- a/codex-rs/protocol/src/models.rs +++ b/codex-rs/protocol/src/models.rs @@ -27,7 +27,7 @@ use crate::user_input::UserInput; use codex_execpolicy::Policy; use codex_git_utils::GhostCommit; use codex_utils_absolute_path::AbsolutePathBuf; -use codex_utils_image::error::ImageProcessingError; +use codex_utils_image::ImageProcessingError; use schemars::JsonSchema; use crate::mcp::CallToolResult; diff --git a/codex-rs/protocol/src/permissions.rs b/codex-rs/protocol/src/permissions.rs index 4aa1c7b36f..416d291fd1 100644 --- a/codex-rs/protocol/src/permissions.rs +++ b/codex-rs/protocol/src/permissions.rs @@ -522,7 +522,7 @@ impl FileSystemSandboxPolicy { if suffix.as_os_str().is_empty() { return None; } - root.join(suffix).ok() + Some(root.join(suffix)) }) } } else { @@ -921,7 +921,7 @@ fn resolve_candidate_path(path: &Path, cwd: &Path) -> Option { if path.is_absolute() { AbsolutePathBuf::from_absolute_path(path).ok() } else { - AbsolutePathBuf::resolve_path_against_base(path, cwd).ok() + Some(AbsolutePathBuf::resolve_path_against_base(path, cwd)) } } @@ -1032,9 +1032,10 @@ fn resolve_file_system_special_path( FileSystemSpecialPath::ProjectRoots { subpath } => { let cwd = cwd?; match subpath.as_ref() { - Some(subpath) => { - AbsolutePathBuf::resolve_path_against_base(subpath, cwd.as_path()).ok() - } + Some(subpath) => Some(AbsolutePathBuf::resolve_path_against_base( + subpath, + cwd.as_path(), + )), None => Some(cwd.clone()), } } @@ -1100,10 +1101,7 @@ fn default_read_only_subpaths_for_writable_root( protect_missing_dot_codex: bool, ) -> Vec { let mut subpaths: Vec = Vec::new(); - #[allow(clippy::expect_used)] - let top_level_git = writable_root - .join(".git") - .expect(".git is a valid relative path"); + let top_level_git = writable_root.join(".git"); // This applies to typical repos (directory .git), worktrees/submodules // (file .git with gitdir pointer), and bare repos when the gitdir is the // writable root itself. @@ -1119,8 +1117,7 @@ fn default_read_only_subpaths_for_writable_root( subpaths.push(top_level_git); } - #[allow(clippy::expect_used)] - let top_level_agents = writable_root.join(".agents").expect("valid relative path"); + let top_level_agents = writable_root.join(".agents"); if top_level_agents.as_path().is_dir() { subpaths.push(top_level_agents); } @@ -1129,8 +1126,7 @@ fn default_read_only_subpaths_for_writable_root( // default. For the workspace root itself, protect it even before the // directory exists so first-time creation still goes through the // protected-path approval flow. - #[allow(clippy::expect_used)] - let top_level_codex = writable_root.join(".codex").expect("valid relative path"); + let top_level_codex = writable_root.join(".codex"); if protect_missing_dot_codex || top_level_codex.as_path().is_dir() { subpaths.push(top_level_codex); } @@ -1227,16 +1223,7 @@ fn resolve_gitdir_from_file(dot_git: &AbsolutePathBuf) -> Option path, - Err(err) => { - error!( - "Failed to resolve gitdir path {gitdir_raw} from {path}: {err}", - path = dot_git.as_path().display() - ); - return None; - } - }; + let gitdir_path = AbsolutePathBuf::resolve_path_against_base(gitdir_raw, base); if !gitdir_path.as_path().exists() { error!( "Resolved gitdir path {path} does not exist.", @@ -1302,7 +1289,7 @@ mod tests { cwd.path().canonicalize().expect("canonicalize cwd"), ) .expect("absolute canonical root"); - let expected_dot_codex = expected_root.join(".codex").expect("expected .codex path"); + let expected_dot_codex = expected_root.join(".codex"); let policy = FileSystemSandboxPolicy::restricted(vec![FileSystemSandboxEntry { path: FileSystemPath::Special { @@ -1329,7 +1316,7 @@ mod tests { cwd.path().canonicalize().expect("canonicalize cwd"), ) .expect("absolute canonical root"); - let explicit_dot_codex = expected_root.join(".codex").expect("expected .codex path"); + let explicit_dot_codex = expected_root.join(".codex"); let policy = FileSystemSandboxPolicy::restricted(vec![ FileSystemSandboxEntry { @@ -1359,10 +1346,7 @@ mod tests { ); assert!( policy.can_write_path_with_cwd( - explicit_dot_codex - .join("config.toml") - .expect("config.toml") - .as_path(), + explicit_dot_codex.join("config.toml").as_path(), cwd.path() ) ); @@ -1451,7 +1435,7 @@ mod tests { let link_root = AbsolutePathBuf::from_absolute_path(&link_root).expect("absolute symlinked root"); - let link_blocked = link_root.join("blocked").expect("symlinked blocked path"); + let link_blocked = link_root.join("blocked"); let expected_root = AbsolutePathBuf::from_absolute_path( real_root.canonicalize().expect("canonicalize real root"), ) @@ -1632,16 +1616,12 @@ mod tests { let link_root = AbsolutePathBuf::from_absolute_path(&link_root).expect("absolute symlinked root"); - let link_private = link_root - .join("linked-private") - .expect("symlinked linked-private path"); + let link_private = link_root.join("linked-private"); let expected_root = AbsolutePathBuf::from_absolute_path( real_root.canonicalize().expect("canonicalize real root"), ) .expect("absolute canonical root"); - let expected_linked_private = expected_root - .join("linked-private") - .expect("expected linked-private path"); + let expected_linked_private = expected_root.join("linked-private"); let unexpected_decoy = AbsolutePathBuf::from_absolute_path(decoy.canonicalize().expect("canonicalize decoy")) .expect("absolute canonical decoy"); @@ -1686,16 +1666,12 @@ mod tests { let link_root = AbsolutePathBuf::from_absolute_path(&link_root).expect("absolute symlinked root"); - let link_private = link_root - .join("linked-private") - .expect("symlinked linked-private path"); + let link_private = link_root.join("linked-private"); let expected_root = AbsolutePathBuf::from_absolute_path( real_root.canonicalize().expect("canonicalize real root"), ) .expect("absolute canonical root"); - let expected_linked_private = expected_root - .join("linked-private") - .expect("expected linked-private path"); + let expected_linked_private = expected_root.join("linked-private"); let unexpected_decoy = AbsolutePathBuf::from_absolute_path(decoy.canonicalize().expect("canonicalize decoy")) .expect("absolute canonical decoy"); @@ -1735,14 +1711,12 @@ mod tests { symlink_dir(&root, &alias).expect("create alias symlink"); let root = AbsolutePathBuf::from_absolute_path(&root).expect("absolute root"); - let alias = root.join("alias-root").expect("alias root path"); + let alias = root.join("alias-root"); let expected_root = AbsolutePathBuf::from_absolute_path( root.as_path().canonicalize().expect("canonicalize root"), ) .expect("absolute canonical root"); - let expected_alias = expected_root - .join("alias-root") - .expect("expected alias path"); + let expected_alias = expected_root.join("alias-root"); let policy = FileSystemSandboxPolicy::restricted(vec![ FileSystemSandboxEntry { @@ -1848,13 +1822,10 @@ mod tests { #[test] fn resolve_access_with_cwd_uses_most_specific_entry() { let cwd = TempDir::new().expect("tempdir"); - let docs = - AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()).expect("resolve docs"); - let docs_private = AbsolutePathBuf::resolve_path_against_base("docs/private", cwd.path()) - .expect("resolve docs/private"); + let docs = AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()); + let docs_private = AbsolutePathBuf::resolve_path_against_base("docs/private", cwd.path()); let docs_private_public = - AbsolutePathBuf::resolve_path_against_base("docs/private/public", cwd.path()) - .expect("resolve docs/private/public"); + AbsolutePathBuf::resolve_path_against_base("docs/private/public", cwd.path()); let policy = FileSystemSandboxPolicy::restricted(vec![ FileSystemSandboxEntry { path: FileSystemPath::Special { @@ -1901,8 +1872,7 @@ mod tests { #[test] fn split_only_nested_carveouts_need_direct_runtime_enforcement() { let cwd = TempDir::new().expect("tempdir"); - let docs = - AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()).expect("resolve docs"); + let docs = AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()); let policy = FileSystemSandboxPolicy::restricted(vec![ FileSystemSandboxEntry { path: FileSystemPath::Special { @@ -1933,8 +1903,7 @@ mod tests { #[test] fn root_write_with_read_only_child_is_not_full_disk_write() { let cwd = TempDir::new().expect("tempdir"); - let docs = - AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()).expect("resolve docs"); + let docs = AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()); let policy = FileSystemSandboxPolicy::restricted(vec![ FileSystemSandboxEntry { path: FileSystemPath::Special { @@ -1961,8 +1930,7 @@ mod tests { #[test] fn root_deny_does_not_materialize_as_unreadable_root() { let cwd = TempDir::new().expect("tempdir"); - let docs = - AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()).expect("resolve docs"); + let docs = AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()); let expected_docs = AbsolutePathBuf::from_absolute_path( cwd.path() .canonicalize() @@ -2025,8 +1993,7 @@ mod tests { #[test] fn same_specificity_write_override_keeps_full_disk_write_access() { let cwd = TempDir::new().expect("tempdir"); - let docs = - AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()).expect("resolve docs"); + let docs = AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()); let policy = FileSystemSandboxPolicy::restricted(vec![ FileSystemSandboxEntry { path: FileSystemPath::Special { diff --git a/codex-rs/protocol/src/protocol.rs b/codex-rs/protocol/src/protocol.rs index 5e0c6010e4..1f199b149b 100644 --- a/codex-rs/protocol/src/protocol.rs +++ b/codex-rs/protocol/src/protocol.rs @@ -48,6 +48,8 @@ use crate::plan_tool::UpdatePlanArgs; use crate::request_permissions::RequestPermissionsEvent; use crate::request_permissions::RequestPermissionsResponse; use crate::request_user_input::RequestUserInputResponse; +use crate::serde_helpers::deserialize_double_option; +use crate::serde_helpers::serialize_double_option; use crate::user_input::UserInput; use codex_git_utils::GitSha; use codex_utils_absolute_path::AbsolutePathBuf; @@ -1095,10 +1097,7 @@ fn default_read_only_subpaths_for_writable_root( protect_missing_dot_codex: bool, ) -> Vec { let mut subpaths: Vec = Vec::new(); - #[allow(clippy::expect_used)] - let top_level_git = writable_root - .join(".git") - .expect(".git is a valid relative path"); + let top_level_git = writable_root.join(".git"); // This applies to typical repos (directory .git), worktrees/submodules // (file .git with gitdir pointer), and bare repos when the gitdir is the // writable root itself. @@ -1114,8 +1113,7 @@ fn default_read_only_subpaths_for_writable_root( subpaths.push(top_level_git); } - #[allow(clippy::expect_used)] - let top_level_agents = writable_root.join(".agents").expect("valid relative path"); + let top_level_agents = writable_root.join(".agents"); if top_level_agents.as_path().is_dir() { subpaths.push(top_level_agents); } @@ -1124,8 +1122,7 @@ fn default_read_only_subpaths_for_writable_root( // default. For the workspace root itself, protect it even before the // directory exists so first-time creation still goes through the // protected-path approval flow. - #[allow(clippy::expect_used)] - let top_level_codex = writable_root.join(".codex").expect("valid relative path"); + let top_level_codex = writable_root.join(".codex"); if protect_missing_dot_codex || top_level_codex.as_path().is_dir() { subpaths.push(top_level_codex); } @@ -1185,16 +1182,7 @@ fn resolve_gitdir_from_file(dot_git: &AbsolutePathBuf) -> Option path, - Err(err) => { - error!( - "Failed to resolve gitdir path {gitdir_raw} from {path}: {err}", - path = dot_git.as_path().display() - ); - return None; - } - }; + let gitdir_path = AbsolutePathBuf::resolve_path_against_base(gitdir_raw, base); if !gitdir_path.as_path().exists() { error!( "Resolved gitdir path {path} does not exist.", @@ -1864,11 +1852,23 @@ pub struct ContextCompactedEvent; pub struct TurnCompleteEvent { pub turn_id: String, pub last_agent_message: Option, + /// Unix timestamp (in seconds) when the turn completed. + #[serde(default, skip_serializing_if = "Option::is_none")] + #[ts(type = "number | null", optional)] + pub completed_at: Option, + /// Duration between turn start and completion in milliseconds, if known. + #[serde(default, skip_serializing_if = "Option::is_none")] + #[ts(type = "number | null", optional)] + pub duration_ms: Option, } #[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)] pub struct TurnStartedEvent { pub turn_id: String, + /// Unix timestamp (in seconds) when the turn started. + #[serde(default, skip_serializing_if = "Option::is_none")] + #[ts(type = "number | null", optional)] + pub started_at: Option, // TODO(aibrahim): make this not optional pub model_context_window: Option, #[serde(default)] @@ -2320,7 +2320,7 @@ impl InitialHistory { } } - pub fn get_base_instructions(&self) -> Option { + pub fn get_base_instructions(&self) -> Option> { // TODO: SessionMeta should (in theory) always be first in the history, so we can probably only check the first item? match self { InitialHistory::New => None, @@ -2337,6 +2337,26 @@ impl InitialHistory { } } + pub fn get_developer_instructions(&self) -> Option> { + match self { + InitialHistory::New => None, + InitialHistory::Resumed(resumed) => { + resumed.history.iter().find_map(|item| match item { + RolloutItem::SessionMeta(meta_line) => { + meta_line.meta.developer_instructions.clone() + } + _ => None, + }) + } + InitialHistory::Forked(items) => items.iter().find_map(|item| match item { + RolloutItem::SessionMeta(meta_line) => { + meta_line.meta.developer_instructions.clone() + } + _ => None, + }), + } + } + pub fn get_dynamic_tools(&self) -> Option> { match self { InitialHistory::New => None, @@ -2528,7 +2548,20 @@ pub struct SessionMeta { /// base_instructions for the session. This *should* always be present when creating a new session, /// but may be missing for older sessions. If not present, fall back to rendering the base_instructions /// from ModelsManager. - pub base_instructions: Option, + #[serde( + default, + deserialize_with = "deserialize_double_option", + serialize_with = "serialize_double_option", + skip_serializing_if = "Option::is_none" + )] + pub base_instructions: Option>, + #[serde( + default, + deserialize_with = "deserialize_double_option", + serialize_with = "serialize_double_option", + skip_serializing_if = "Option::is_none" + )] + pub developer_instructions: Option>, #[serde(skip_serializing_if = "Option::is_none")] pub dynamic_tools: Option>, #[serde(skip_serializing_if = "Option::is_none")] @@ -2550,6 +2583,7 @@ impl Default for SessionMeta { agent_path: None, model_provider: None, base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, } @@ -3375,6 +3409,14 @@ pub struct Chunk { pub struct TurnAbortedEvent { pub turn_id: Option, pub reason: TurnAbortReason, + /// Unix timestamp (in seconds) when the turn was aborted. + #[serde(default, skip_serializing_if = "Option::is_none")] + #[ts(type = "number | null", optional)] + pub completed_at: Option, + /// Duration between turn start and abort in milliseconds, if known. + #[serde(default, skip_serializing_if = "Option::is_none")] + #[ts(type = "number | null", optional)] + pub duration_ms: Option, } #[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema, TS)] @@ -3978,8 +4020,7 @@ mod tests { .last() .and_then(|path| AbsolutePathBuf::from_absolute_path(path).ok()) .expect("filesystem root"); - let blocked = AbsolutePathBuf::resolve_path_against_base("blocked", cwd.path()) - .expect("resolve blocked"); + let blocked = AbsolutePathBuf::resolve_path_against_base("blocked", cwd.path()); let expected_blocked = AbsolutePathBuf::from_absolute_path( cwd.path() .canonicalize() @@ -4030,8 +4071,7 @@ mod tests { let canonical_cwd = cwd.path().canonicalize().expect("canonicalize cwd"); let cwd_absolute = AbsolutePathBuf::from_absolute_path(&canonical_cwd).expect("absolute tempdir"); - let secret = AbsolutePathBuf::resolve_path_against_base("secret", cwd.path()) - .expect("resolve unreadable path"); + let secret = AbsolutePathBuf::resolve_path_against_base("secret", cwd.path()); let expected_secret = AbsolutePathBuf::from_absolute_path(canonical_cwd.join("secret")) .expect("canonical secret"); let expected_agents = AbsolutePathBuf::from_absolute_path(canonical_cwd.join(".agents")) @@ -4096,10 +4136,8 @@ mod tests { fn restricted_file_system_policy_treats_read_entries_as_read_only_subpaths() { let cwd = TempDir::new().expect("tempdir"); let canonical_cwd = cwd.path().canonicalize().expect("canonicalize cwd"); - let docs = - AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()).expect("resolve docs"); - let docs_public = AbsolutePathBuf::resolve_path_against_base("docs/public", cwd.path()) - .expect("resolve docs/public"); + let docs = AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()); + let docs_public = AbsolutePathBuf::resolve_path_against_base("docs/public", cwd.path()); let expected_docs = AbsolutePathBuf::from_absolute_path(canonical_cwd.join("docs")) .expect("canonical docs"); let expected_docs_public = @@ -4143,8 +4181,7 @@ mod tests { #[test] fn legacy_workspace_write_nested_readable_root_stays_writable() { let cwd = TempDir::new().expect("tempdir"); - let docs = - AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()).expect("resolve docs"); + let docs = AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()); let canonical_cwd = cwd.path().canonicalize().expect("canonicalize cwd"); let expected_dot_codex = AbsolutePathBuf::from_absolute_path(canonical_cwd.join(".codex")) .expect("canonical .codex"); @@ -4201,12 +4238,9 @@ mod tests { #[test] fn legacy_sandbox_policy_semantics_survive_split_bridge() { let cwd = TempDir::new().expect("tempdir"); - let readable_root = AbsolutePathBuf::resolve_path_against_base("readable", cwd.path()) - .expect("resolve readable root"); - let writable_root = AbsolutePathBuf::resolve_path_against_base("writable", cwd.path()) - .expect("resolve writable root"); - let nested_readable_root = AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()) - .expect("resolve nested readable root"); + let readable_root = AbsolutePathBuf::resolve_path_against_base("readable", cwd.path()); + let writable_root = AbsolutePathBuf::resolve_path_against_base("writable", cwd.path()); + let nested_readable_root = AbsolutePathBuf::resolve_path_against_base("docs", cwd.path()); let policies = [ SandboxPolicy::DangerFullAccess, SandboxPolicy::ExternalSandbox { @@ -4543,7 +4577,9 @@ mod tests { }))?; match event { - EventMsg::TurnAborted(TurnAbortedEvent { turn_id, reason }) => { + EventMsg::TurnAborted(TurnAbortedEvent { + turn_id, reason, .. + }) => { assert_eq!(turn_id, None); assert_eq!(reason, TurnAbortReason::Interrupted); } diff --git a/codex-rs/protocol/src/serde_helpers.rs b/codex-rs/protocol/src/serde_helpers.rs new file mode 100644 index 0000000000..0e35ebdba7 --- /dev/null +++ b/codex-rs/protocol/src/serde_helpers.rs @@ -0,0 +1,23 @@ +use serde::Deserialize; +use serde::Deserializer; +use serde::Serialize; +use serde::Serializer; + +pub fn deserialize_double_option<'de, T, D>(deserializer: D) -> Result>, D::Error> +where + T: Deserialize<'de>, + D: Deserializer<'de>, +{ + serde_with::rust::double_option::deserialize(deserializer) +} + +pub fn serialize_double_option( + value: &Option>, + serializer: S, +) -> Result +where + T: Serialize, + S: Serializer, +{ + serde_with::rust::double_option::serialize(value, serializer) +} diff --git a/codex-rs/response-debug-context/src/lib.rs b/codex-rs/response-debug-context/src/lib.rs index ed5a6bdc6f..f7c90b7533 100644 --- a/codex-rs/response-debug-context/src/lib.rs +++ b/codex-rs/response-debug-context/src/lib.rs @@ -1,6 +1,6 @@ use base64::Engine; +use codex_api::ApiError; use codex_api::TransportError; -use codex_api::error::ApiError; const REQUEST_ID_HEADER: &str = "x-request-id"; const OAI_REQUEST_ID_HEADER: &str = "x-oai-request-id"; @@ -91,8 +91,8 @@ mod tests { use super::extract_response_debug_context; use super::telemetry_api_error_message; use super::telemetry_transport_error_message; + use codex_api::ApiError; use codex_api::TransportError; - use codex_api::error::ApiError; use http::HeaderMap; use http::HeaderValue; use http::StatusCode; diff --git a/codex-rs/rmcp-client/Cargo.toml b/codex-rs/rmcp-client/Cargo.toml index 4b20e9d6eb..aa5ab5eee6 100644 --- a/codex-rs/rmcp-client/Cargo.toml +++ b/codex-rs/rmcp-client/Cargo.toml @@ -14,6 +14,7 @@ axum = { workspace = true, default-features = false, features = [ "tokio", ] } codex-client = { workspace = true } +codex-config = { workspace = true } codex-keyring-store = { workspace = true } codex-protocol = { workspace = true } codex-utils-pty = { workspace = true } @@ -37,7 +38,6 @@ rmcp = { workspace = true, default-features = false, features = [ "transport-streamable-http-client-reqwest", "transport-streamable-http-server", ] } -schemars = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } sha2 = { workspace = true } diff --git a/codex-rs/rmcp-client/src/auth_status.rs b/codex-rs/rmcp-client/src/auth_status.rs index f97898548c..0b3b3bf6a7 100644 --- a/codex-rs/rmcp-client/src/auth_status.rs +++ b/codex-rs/rmcp-client/src/auth_status.rs @@ -12,10 +12,10 @@ use reqwest::header::HeaderMap; use serde::Deserialize; use tracing::debug; -use crate::OAuthCredentialsStoreMode; use crate::oauth::has_oauth_tokens; use crate::utils::apply_default_headers; use crate::utils::build_default_headers; +use codex_config::types::OAuthCredentialsStoreMode; const DISCOVERY_TIMEOUT: Duration = Duration::from_secs(5); const OAUTH_DISCOVERY_HEADER: &str = "MCP-Protocol-Version"; diff --git a/codex-rs/rmcp-client/src/lib.rs b/codex-rs/rmcp-client/src/lib.rs index 627b9f2e71..65b9cf8e28 100644 --- a/codex-rs/rmcp-client/src/lib.rs +++ b/codex-rs/rmcp-client/src/lib.rs @@ -11,7 +11,6 @@ pub use auth_status::determine_streamable_http_auth_status; pub use auth_status::discover_streamable_http_oauth; pub use auth_status::supports_oauth_login; pub use codex_protocol::protocol::McpAuthStatus; -pub use oauth::OAuthCredentialsStoreMode; pub use oauth::StoredOAuthTokens; pub use oauth::WrappedOAuthTokenResponse; pub use oauth::delete_oauth_tokens; diff --git a/codex-rs/rmcp-client/src/oauth.rs b/codex-rs/rmcp-client/src/oauth.rs index cdb64ff151..ddabffe29f 100644 --- a/codex-rs/rmcp-client/src/oauth.rs +++ b/codex-rs/rmcp-client/src/oauth.rs @@ -19,6 +19,7 @@ use anyhow::Context; use anyhow::Error; use anyhow::Result; +use codex_config::types::OAuthCredentialsStoreMode; use oauth2::AccessToken; use oauth2::EmptyExtraTokenFields; use oauth2::RefreshToken; @@ -26,7 +27,6 @@ use oauth2::Scope; use oauth2::TokenResponse; use oauth2::basic::BasicTokenType; use rmcp::transport::auth::OAuthTokenResponse; -use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use serde_json::Value; @@ -63,21 +63,6 @@ pub struct StoredOAuthTokens { pub expires_at: Option, } -/// Determine where Codex should store and read MCP credentials. -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[serde(rename_all = "lowercase")] -pub enum OAuthCredentialsStoreMode { - /// `Keyring` when available; otherwise, `File`. - /// Credentials stored in the keyring will only be readable by Codex unless the user explicitly grants access via OS-level keyring access. - #[default] - Auto, - /// CODEX_HOME/.credentials.json - /// This file will be readable to Codex and other applications running as the same user. - File, - /// Keyring when available, otherwise fail. - Keyring, -} - /// Wrap OAuthTokenResponse to allow for partial equality comparison. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct WrappedOAuthTokenResponse(pub OAuthTokenResponse); diff --git a/codex-rs/rmcp-client/src/perform_oauth_login.rs b/codex-rs/rmcp-client/src/perform_oauth_login.rs index 821fbf2750..5bdb315381 100644 --- a/codex-rs/rmcp-client/src/perform_oauth_login.rs +++ b/codex-rs/rmcp-client/src/perform_oauth_login.rs @@ -16,13 +16,13 @@ use tokio::sync::oneshot; use tokio::time::timeout; use urlencoding::decode; -use crate::OAuthCredentialsStoreMode; use crate::StoredOAuthTokens; use crate::WrappedOAuthTokenResponse; use crate::oauth::compute_expires_at_millis; use crate::save_oauth_tokens; use crate::utils::apply_default_headers; use crate::utils::build_default_headers; +use codex_config::types::OAuthCredentialsStoreMode; struct OauthHeaders { http_headers: Option>, diff --git a/codex-rs/rmcp-client/src/rmcp_client.rs b/codex-rs/rmcp-client/src/rmcp_client.rs index aa460c21b9..d3316a0491 100644 --- a/codex-rs/rmcp-client/src/rmcp_client.rs +++ b/codex-rs/rmcp-client/src/rmcp_client.rs @@ -69,13 +69,13 @@ use tracing::warn; use crate::load_oauth_tokens; use crate::logging_client_handler::LoggingClientHandler; -use crate::oauth::OAuthCredentialsStoreMode; use crate::oauth::OAuthPersistor; use crate::oauth::StoredOAuthTokens; use crate::program_resolver; use crate::utils::apply_default_headers; use crate::utils::build_default_headers; use crate::utils::create_env_for_mcp_server; +use codex_config::types::OAuthCredentialsStoreMode; const EVENT_STREAM_MIME_TYPE: &str = "text/event-stream"; const JSON_MIME_TYPE: &str = "application/json"; diff --git a/codex-rs/rmcp-client/tests/streamable_http_recovery.rs b/codex-rs/rmcp-client/tests/streamable_http_recovery.rs index 6a75582c97..c0525aafaf 100644 --- a/codex-rs/rmcp-client/tests/streamable_http_recovery.rs +++ b/codex-rs/rmcp-client/tests/streamable_http_recovery.rs @@ -3,9 +3,9 @@ use std::path::PathBuf; use std::time::Duration; use std::time::Instant; +use codex_config::types::OAuthCredentialsStoreMode; use codex_rmcp_client::ElicitationAction; use codex_rmcp_client::ElicitationResponse; -use codex_rmcp_client::OAuthCredentialsStoreMode; use codex_rmcp_client::RmcpClient; use codex_utils_cargo_bin::CargoBinError; use futures::FutureExt as _; diff --git a/codex-rs/rollout/src/lib.rs b/codex-rs/rollout/src/lib.rs index 160792a390..6acf2c3be4 100644 --- a/codex-rs/rollout/src/lib.rs +++ b/codex-rs/rollout/src/lib.rs @@ -4,12 +4,12 @@ use std::sync::LazyLock; use codex_protocol::protocol::SessionSource; -pub mod config; -pub mod list; -pub mod metadata; -pub mod policy; -pub mod recorder; -pub mod session_index; +pub(crate) mod config; +pub(crate) mod list; +pub(crate) mod metadata; +pub(crate) mod policy; +pub(crate) mod recorder; +pub(crate) mod session_index; pub mod state_db; pub(crate) mod default_client { @@ -30,14 +30,28 @@ pub static INTERACTIVE_SESSION_SOURCES: LazyLock> = LazyLock: }); pub use codex_protocol::protocol::SessionMeta; +pub use config::Config; pub use config::RolloutConfig; pub use config::RolloutConfigView; +pub use list::Cursor; +pub use list::ThreadItem; +pub use list::ThreadListConfig; +pub use list::ThreadListLayout; +pub use list::ThreadSortKey; +pub use list::ThreadsPage; pub use list::find_archived_thread_path_by_id_str; pub use list::find_thread_path_by_id_str; #[deprecated(note = "use find_thread_path_by_id_str")] pub use list::find_thread_path_by_id_str as find_conversation_path_by_id_str; +pub use list::get_threads; +pub use list::get_threads_in_root; +pub use list::parse_cursor; +pub use list::read_head_for_summary; +pub use list::read_session_meta_line; pub use list::rollout_date_parts; +pub use metadata::builder_from_items; pub use policy::EventPersistenceMode; +pub use policy::should_persist_response_item_for_memories; pub use recorder::RolloutRecorder; pub use recorder::RolloutRecorderParams; pub use session_index::append_thread_name; diff --git a/codex-rs/rollout/src/metadata.rs b/codex-rs/rollout/src/metadata.rs index 51ebb5ef1e..e8a95839d7 100644 --- a/codex-rs/rollout/src/metadata.rs +++ b/codex-rs/rollout/src/metadata.rs @@ -137,7 +137,7 @@ pub(crate) async fn backfill_sessions( runtime: &codex_state::StateRuntime, config: &impl RolloutConfigView, ) { - let metric_client = codex_otel::metrics::global(); + let metric_client = codex_otel::global(); let timer = metric_client .as_ref() .and_then(|otel| otel.start_timer(DB_METRIC_BACKFILL_DURATION_MS, &[]).ok()); diff --git a/codex-rs/rollout/src/metadata_tests.rs b/codex-rs/rollout/src/metadata_tests.rs index 8a149313dc..fdea29206a 100644 --- a/codex-rs/rollout/src/metadata_tests.rs +++ b/codex-rs/rollout/src/metadata_tests.rs @@ -56,6 +56,7 @@ async fn extract_metadata_from_rollout_uses_session_meta() { agent_role: None, model_provider: Some("openai".to_string()), base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, }; @@ -107,6 +108,7 @@ async fn extract_metadata_from_rollout_returns_latest_memory_mode() { agent_role: None, model_provider: Some("openai".to_string()), base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, }; @@ -369,6 +371,7 @@ fn write_rollout_in_sessions_with_cwd( agent_role: None, model_provider: Some("test-provider".to_string()), base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, }; diff --git a/codex-rs/rollout/src/recorder.rs b/codex-rs/rollout/src/recorder.rs index f39c38af60..b589ffac97 100644 --- a/codex-rs/rollout/src/recorder.rs +++ b/codex-rs/rollout/src/recorder.rs @@ -81,7 +81,8 @@ pub enum RolloutRecorderParams { conversation_id: ThreadId, forked_from_id: Option, source: SessionSource, - base_instructions: BaseInstructions, + base_instructions: Option, + developer_instructions: Option>, dynamic_tools: Vec, event_persistence_mode: EventPersistenceMode, }, @@ -110,7 +111,8 @@ impl RolloutRecorderParams { conversation_id: ThreadId, forked_from_id: Option, source: SessionSource, - base_instructions: BaseInstructions, + base_instructions: Option, + developer_instructions: Option>, dynamic_tools: Vec, event_persistence_mode: EventPersistenceMode, ) -> Self { @@ -119,6 +121,7 @@ impl RolloutRecorderParams { forked_from_id, source, base_instructions, + developer_instructions, dynamic_tools, event_persistence_mode, } @@ -380,6 +383,7 @@ impl RolloutRecorder { forked_from_id, source, base_instructions, + developer_instructions, dynamic_tools, event_persistence_mode, } => { @@ -409,6 +413,7 @@ impl RolloutRecorder { source, model_provider: Some(config.model_provider_id().to_string()), base_instructions: Some(base_instructions), + developer_instructions, dynamic_tools: if dynamic_tools.is_empty() { None } else { diff --git a/codex-rs/rollout/src/recorder_tests.rs b/codex-rs/rollout/src/recorder_tests.rs index 163c8a1ee8..44e8ecb8c8 100644 --- a/codex-rs/rollout/src/recorder_tests.rs +++ b/codex-rs/rollout/src/recorder_tests.rs @@ -73,7 +73,8 @@ async fn recorder_materializes_only_after_explicit_persist() -> std::io::Result< thread_id, /*forked_from_id*/ None, SessionSource::Exec, - BaseInstructions::default(), + Some(BaseInstructions::default()), + /*developer_instructions*/ None, Vec::new(), EventPersistenceMode::Limited, ), @@ -166,7 +167,8 @@ async fn metadata_irrelevant_events_touch_state_db_updated_at() -> std::io::Resu thread_id, /*forked_from_id*/ None, SessionSource::Cli, - BaseInstructions::default(), + Some(BaseInstructions::default()), + /*developer_instructions*/ None, Vec::new(), EventPersistenceMode::Limited, ), diff --git a/codex-rs/rollout/src/tests.rs b/codex-rs/rollout/src/tests.rs index e647a54cd4..4fbd58c14b 100644 --- a/codex-rs/rollout/src/tests.rs +++ b/codex-rs/rollout/src/tests.rs @@ -977,7 +977,7 @@ async fn test_get_thread_contents() { } #[tokio::test] -async fn test_base_instructions_missing_in_meta_defaults_to_null() { +async fn test_base_instructions_missing_in_meta_stays_missing() { let temp = TempDir::new().unwrap(); let home = temp.path(); @@ -1011,10 +1011,7 @@ async fn test_base_instructions_missing_in_meta_defaults_to_null() { .await .expect("session meta head"); let first = head.first().expect("first head entry"); - assert_eq!( - first.get("base_instructions"), - Some(&serde_json::Value::Null) - ); + assert_eq!(first.get("base_instructions"), None); } #[tokio::test] @@ -1143,6 +1140,7 @@ async fn test_updated_at_uses_file_mtime() -> Result<()> { agent_role: None, model_provider: Some("test-provider".into()), base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, }, diff --git a/codex-rs/sandboxing/src/bwrap.rs b/codex-rs/sandboxing/src/bwrap.rs index aa73af1d3e..01d53ebbe8 100644 --- a/codex-rs/sandboxing/src/bwrap.rs +++ b/codex-rs/sandboxing/src/bwrap.rs @@ -1,15 +1,33 @@ use codex_protocol::protocol::SandboxPolicy; use std::path::Path; use std::path::PathBuf; +use std::process::Command; +use std::process::Output; const SYSTEM_BWRAP_PROGRAM: &str = "bwrap"; +const MISSING_BWRAP_WARNING: &str = concat!( + "Codex could not find bubblewrap on PATH. ", + "Install bubblewrap with your OS package manager. ", + "See the sandbox prerequisites: ", + "https://developers.openai.com/codex/concepts/sandboxing#prerequisites. ", + "Codex will use the vendored bubblewrap in the meantime.", +); +const USER_NAMESPACE_WARNING: &str = + "Codex's Linux sandbox uses bubblewrap and needs access to create user namespaces."; +const USER_NAMESPACE_FAILURES: [&str; 4] = [ + "loopback: Failed RTM_NEWADDR", + "loopback: Failed RTM_NEWLINK", + "setting up uid map: Permission denied", + "No permissions to create a new namespace", +]; pub fn system_bwrap_warning(sandbox_policy: &SandboxPolicy) -> Option { if !should_warn_about_system_bwrap(sandbox_policy) { return None; } - system_bwrap_warning_for_lookup(find_system_bwrap_in_path()) + let system_bwrap_path = find_system_bwrap_in_path(); + system_bwrap_warning_for_path(system_bwrap_path.as_deref()) } fn should_warn_about_system_bwrap(sandbox_policy: &SandboxPolicy) -> bool { @@ -19,14 +37,42 @@ fn should_warn_about_system_bwrap(sandbox_policy: &SandboxPolicy) -> bool { ) } -fn system_bwrap_warning_for_lookup(system_bwrap_path: Option) -> Option { - match system_bwrap_path { - Some(_) => None, - None => Some( - "Codex could not find system bubblewrap on PATH. Please install bubblewrap with your package manager. Codex will use the vendored bubblewrap in the meantime." - .to_string(), - ), +fn system_bwrap_warning_for_path(system_bwrap_path: Option<&Path>) -> Option { + let Some(system_bwrap_path) = system_bwrap_path else { + return Some(MISSING_BWRAP_WARNING.to_string()); + }; + + if !system_bwrap_has_user_namespace_access(system_bwrap_path) { + return Some(USER_NAMESPACE_WARNING.to_string()); } + + None +} + +fn system_bwrap_has_user_namespace_access(system_bwrap_path: &Path) -> bool { + let output = match Command::new(system_bwrap_path) + .args([ + "--unshare-user", + "--unshare-net", + "--ro-bind", + "/", + "/", + "/bin/true", + ]) + .output() + { + Ok(output) => output, + Err(_) => return true, + }; + + output.status.success() || !is_user_namespace_failure(&output) +} + +fn is_user_namespace_failure(output: &Output) -> bool { + let stderr = String::from_utf8_lossy(&output.stderr); + USER_NAMESPACE_FAILURES + .iter() + .any(|failure| stderr.contains(failure)) } pub fn find_system_bwrap_in_path() -> Option { diff --git a/codex-rs/sandboxing/src/bwrap_tests.rs b/codex-rs/sandboxing/src/bwrap_tests.rs index 43eddb39c4..b0303e2b78 100644 --- a/codex-rs/sandboxing/src/bwrap_tests.rs +++ b/codex-rs/sandboxing/src/bwrap_tests.rs @@ -6,30 +6,42 @@ use tempfile::tempdir; #[test] fn system_bwrap_warning_reports_missing_system_bwrap() { - let warning = system_bwrap_warning_for_lookup(/*system_bwrap_path*/ None) - .expect("missing system bwrap should emit a warning"); - - assert!(warning.contains("could not find system bubblewrap")); + assert_eq!( + system_bwrap_warning_for_path(/*system_bwrap_path*/ None), + Some(MISSING_BWRAP_WARNING.to_string()) + ); } #[test] -fn system_bwrap_warning_skips_too_old_system_bwrap() { +fn system_bwrap_warning_reports_user_namespace_failures() { + for failure in USER_NAMESPACE_FAILURES { + let fake_bwrap = write_fake_bwrap(&format!( + r#"#!/bin/sh +echo '{failure}' >&2 +exit 1 +"# + )); + let fake_bwrap_path: &Path = fake_bwrap.as_ref(); + + assert_eq!( + system_bwrap_warning_for_path(Some(fake_bwrap_path)), + Some(USER_NAMESPACE_WARNING.to_string()), + "{failure}", + ); + } +} + +#[test] +fn system_bwrap_warning_skips_unrelated_bwrap_failures() { let fake_bwrap = write_fake_bwrap( r#"#!/bin/sh -if [ "$1" = "--help" ]; then - echo 'usage: bwrap [OPTION...] COMMAND' - exit 0 -fi +echo 'bwrap: Unknown option --argv0' >&2 exit 1 "#, ); let fake_bwrap_path: &Path = fake_bwrap.as_ref(); - assert_eq!( - system_bwrap_warning_for_lookup(Some(fake_bwrap_path.to_path_buf())), - None, - "Do not warn even if bwrap does not support `--argv0`", - ); + assert_eq!(system_bwrap_warning_for_path(Some(fake_bwrap_path)), None); } #[test] @@ -102,5 +114,5 @@ fn write_named_fake_bwrap_in(dir: &Path) -> PathBuf { fs::write(&path, "#!/bin/sh\n").expect("write fake bwrap"); let permissions = fs::Permissions::from_mode(0o755); fs::set_permissions(&path, permissions).expect("chmod fake bwrap"); - path + fs::canonicalize(path).expect("canonicalize fake bwrap") } diff --git a/codex-rs/sandboxing/src/manager_tests.rs b/codex-rs/sandboxing/src/manager_tests.rs index c3a4b32e78..e4e0e03114 100644 --- a/codex-rs/sandboxing/src/manager_tests.rs +++ b/codex-rs/sandboxing/src/manager_tests.rs @@ -172,8 +172,8 @@ fn transform_additional_permissions_preserves_denied_entries() { canonicalize(temp_dir.path()).expect("canonicalize temp dir"), ) .expect("absolute temp dir"); - let allowed_path = workspace_root.join("allowed").expect("allowed path"); - let denied_path = workspace_root.join("denied").expect("denied path"); + let allowed_path = workspace_root.join("allowed"); + let denied_path = workspace_root.join("denied"); let exec_request = manager .transform( SandboxCommand { diff --git a/codex-rs/sandboxing/src/policy_transforms_tests.rs b/codex-rs/sandboxing/src/policy_transforms_tests.rs index d287bd243c..10f49d95f3 100644 --- a/codex-rs/sandboxing/src/policy_transforms_tests.rs +++ b/codex-rs/sandboxing/src/policy_transforms_tests.rs @@ -52,8 +52,7 @@ fn root_write_policy_with_carveouts_still_uses_platform_sandbox() { let blocked = AbsolutePathBuf::resolve_path_against_base( "blocked", std::env::current_dir().expect("current dir"), - ) - .expect("blocked path"); + ); let policy = FileSystemSandboxPolicy::restricted(vec![ FileSystemSandboxEntry { path: FileSystemPath::Special { @@ -317,8 +316,8 @@ fn merge_file_system_policy_with_additional_permissions_preserves_unreadable_roo canonicalize(temp_dir.path()).expect("canonicalize temp dir"), ) .expect("absolute temp dir"); - let allowed_path = cwd.join("allowed").expect("allowed path"); - let denied_path = cwd.join("denied").expect("denied path"); + let allowed_path = cwd.join("allowed"); + let denied_path = cwd.join("denied"); let merged_policy = merge_file_system_policy_with_additional_permissions( &FileSystemSandboxPolicy::restricted(vec![ FileSystemSandboxEntry { @@ -361,7 +360,7 @@ fn effective_file_system_sandbox_policy_returns_base_policy_without_additional_p canonicalize(temp_dir.path()).expect("canonicalize temp dir"), ) .expect("absolute temp dir"); - let denied_path = cwd.join("denied").expect("denied path"); + let denied_path = cwd.join("denied"); let base_policy = FileSystemSandboxPolicy::restricted(vec![ FileSystemSandboxEntry { path: FileSystemPath::Special { @@ -388,8 +387,8 @@ fn effective_file_system_sandbox_policy_merges_additional_write_roots() { canonicalize(temp_dir.path()).expect("canonicalize temp dir"), ) .expect("absolute temp dir"); - let allowed_path = cwd.join("allowed").expect("allowed path"); - let denied_path = cwd.join("denied").expect("denied path"); + let allowed_path = cwd.join("allowed"); + let denied_path = cwd.join("denied"); let base_policy = FileSystemSandboxPolicy::restricted(vec![ FileSystemSandboxEntry { path: FileSystemPath::Special { diff --git a/codex-rs/sandboxing/src/seatbelt_base_policy.sbpl b/codex-rs/sandboxing/src/seatbelt_base_policy.sbpl index 8d80277f6c..99f43e42e3 100644 --- a/codex-rs/sandboxing/src/seatbelt_base_policy.sbpl +++ b/codex-rs/sandboxing/src/seatbelt_base_policy.sbpl @@ -92,6 +92,12 @@ ; Needed for python multiprocessing on MacOS for the SemLock (allow ipc-posix-sem) +; Needed for PyTorch/libomp on macOS to register OpenMP runtimes. +(allow ipc-posix-shm-read-data + ipc-posix-shm-write-create + ipc-posix-shm-write-unlink + (ipc-posix-name-regex #"^/__KMP_REGISTERED_LIB_[0-9]+$")) + (allow mach-lookup (global-name "com.apple.PowerManagement.control") ) diff --git a/codex-rs/sandboxing/src/seatbelt_tests.rs b/codex-rs/sandboxing/src/seatbelt_tests.rs index 7a2a6d8b56..88d47db75c 100644 --- a/codex-rs/sandboxing/src/seatbelt_tests.rs +++ b/codex-rs/sandboxing/src/seatbelt_tests.rs @@ -60,6 +60,19 @@ fn base_policy_allows_node_cpu_sysctls() { ); } +#[test] +fn base_policy_allows_kmp_registration_shm_read_create_and_unlink() { + let expected = r##"(allow ipc-posix-shm-read-data + ipc-posix-shm-write-create + ipc-posix-shm-write-unlink + (ipc-posix-name-regex #"^/__KMP_REGISTERED_LIB_[0-9]+$"))"##; + + assert!( + MACOS_SEATBELT_BASE_POLICY.contains(expected), + "base policy must allow only KMP registration shm read/create/unlink:\n{MACOS_SEATBELT_BASE_POLICY}" + ); +} + #[test] fn create_seatbelt_args_routes_network_through_proxy_ports() { let policy = dynamic_network_policy( diff --git a/codex-rs/shell-command/src/command_safety/mod.rs b/codex-rs/shell-command/src/command_safety/mod.rs index 12e467bdbe..31fa64f8af 100644 --- a/codex-rs/shell-command/src/command_safety/mod.rs +++ b/codex-rs/shell-command/src/command_safety/mod.rs @@ -2,4 +2,4 @@ mod powershell_parser; pub mod is_dangerous_command; pub mod is_safe_command; -pub mod windows_safe_commands; +pub(crate) mod windows_safe_commands; diff --git a/codex-rs/shell-command/src/lib.rs b/codex-rs/shell-command/src/lib.rs index 215c30fd7d..1d9e302a4e 100644 --- a/codex-rs/shell-command/src/lib.rs +++ b/codex-rs/shell-command/src/lib.rs @@ -3,7 +3,7 @@ mod shell_detect; pub mod bash; -pub mod command_safety; +pub(crate) mod command_safety; pub mod parse_command; pub mod powershell; diff --git a/codex-rs/shell-command/src/powershell.rs b/codex-rs/shell-command/src/powershell.rs index 4d3b553333..d6ae79245b 100644 --- a/codex-rs/shell-command/src/powershell.rs +++ b/codex-rs/shell-command/src/powershell.rs @@ -112,10 +112,8 @@ pub fn try_find_pwsh_executable_blocking() -> Option { { let candidate = AbsolutePathBuf::resolve_path_against_base("pwsh.exe", &ps_home); - if let Ok(candidate_abs_path) = candidate - && is_powershellish_executable_available(candidate_abs_path.as_path()) - { - return Some(candidate_abs_path); + if is_powershellish_executable_available(candidate.as_path()) { + return Some(candidate); } } diff --git a/codex-rs/shell-escalation/src/lib.rs b/codex-rs/shell-escalation/src/lib.rs index edad193afd..6c7e1ec605 100644 --- a/codex-rs/shell-escalation/src/lib.rs +++ b/codex-rs/shell-escalation/src/lib.rs @@ -1,6 +1,8 @@ #[cfg(unix)] mod unix; +#[cfg(unix)] +pub use unix::ESCALATE_SOCKET_ENV_VAR; #[cfg(unix)] pub use unix::EscalateAction; #[cfg(unix)] @@ -28,8 +30,6 @@ pub use unix::ShellCommandExecutor; #[cfg(unix)] pub use unix::Stopwatch; #[cfg(unix)] -pub use unix::escalate_protocol::ESCALATE_SOCKET_ENV_VAR; -#[cfg(unix)] pub use unix::main_execve_wrapper; #[cfg(unix)] pub use unix::run_shell_escalation_execve_wrapper; diff --git a/codex-rs/shell-escalation/src/unix/escalate_server.rs b/codex-rs/shell-escalation/src/unix/escalate_server.rs index 34b8562160..2f31324ef0 100644 --- a/codex-rs/shell-escalation/src/unix/escalate_server.rs +++ b/codex-rs/shell-escalation/src/unix/escalate_server.rs @@ -274,7 +274,7 @@ async fn handle_escalate_session_with_policy( _ = parent_cancellation_token.cancelled() => return Ok(()), _ = session_cancellation_token.cancelled() => return Ok(()), }; - let program = AbsolutePathBuf::resolve_path_against_base(file, workdir.as_path())?; + let program = AbsolutePathBuf::resolve_path_against_base(file, workdir.as_path()); let decision = tokio::select! { decision = policy.determine_action(&program, &argv, &workdir) => { decision.context("failed to determine escalation action")? @@ -718,7 +718,7 @@ mod tests { let workdir = tmp.path().join("workspace"); std::fs::create_dir(&workdir)?; let workdir = AbsolutePathBuf::try_from(workdir)?; - let expected_file = workdir.join("bin/tool")?; + let expected_file = workdir.join("bin/tool"); let server_task = tokio::spawn(handle_escalate_session_with_policy( server, Arc::new(AssertingEscalationPolicy { diff --git a/codex-rs/shell-escalation/src/unix/mod.rs b/codex-rs/shell-escalation/src/unix/mod.rs index 13b7146ab4..b5d999411c 100644 --- a/codex-rs/shell-escalation/src/unix/mod.rs +++ b/codex-rs/shell-escalation/src/unix/mod.rs @@ -53,15 +53,16 @@ //! | | //! o<-----x //! -pub mod escalate_client; -pub mod escalate_protocol; -pub mod escalate_server; -pub mod escalation_policy; -pub mod execve_wrapper; -pub mod socket; -pub mod stopwatch; +pub(crate) mod escalate_client; +pub(crate) mod escalate_protocol; +pub(crate) mod escalate_server; +pub(crate) mod escalation_policy; +pub(crate) mod execve_wrapper; +pub(crate) mod socket; +pub(crate) mod stopwatch; pub use self::escalate_client::run_shell_escalation_execve_wrapper; +pub use self::escalate_protocol::ESCALATE_SOCKET_ENV_VAR; pub use self::escalate_protocol::EscalateAction; pub use self::escalate_protocol::EscalationDecision; pub use self::escalate_protocol::EscalationExecution; diff --git a/codex-rs/skills/src/lib.rs b/codex-rs/skills/src/lib.rs index 99cde06f6f..3ea802f20d 100644 --- a/codex-rs/skills/src/lib.rs +++ b/codex-rs/skills/src/lib.rs @@ -21,7 +21,7 @@ const SYSTEM_SKILLS_MARKER_SALT: &str = "v1"; /// This is typically located at `CODEX_HOME/skills/.system`. pub fn system_cache_root_dir(codex_home: &Path) -> PathBuf { AbsolutePathBuf::try_from(codex_home) - .and_then(|codex_home| system_cache_root_dir_abs(&codex_home)) + .map(|codex_home| system_cache_root_dir_abs(&codex_home)) .map(AbsolutePathBuf::into_path_buf) .unwrap_or_else(|_| { codex_home @@ -30,9 +30,9 @@ pub fn system_cache_root_dir(codex_home: &Path) -> PathBuf { }) } -fn system_cache_root_dir_abs(codex_home: &AbsolutePathBuf) -> std::io::Result { +fn system_cache_root_dir_abs(codex_home: &AbsolutePathBuf) -> AbsolutePathBuf { codex_home - .join(SKILLS_DIR_NAME)? + .join(SKILLS_DIR_NAME) .join(SYSTEM_SKILLS_DIR_NAME) } @@ -47,18 +47,13 @@ fn system_cache_root_dir_abs(codex_home: &AbsolutePathBuf) -> std::io::Result Result<(), SystemSkillsError> { let codex_home = AbsolutePathBuf::try_from(codex_home) .map_err(|source| SystemSkillsError::io("normalize codex home dir", source))?; - let skills_root_dir = codex_home - .join(SKILLS_DIR_NAME) - .map_err(|source| SystemSkillsError::io("resolve skills root dir", source))?; + let skills_root_dir = codex_home.join(SKILLS_DIR_NAME); fs::create_dir_all(skills_root_dir.as_path()) .map_err(|source| SystemSkillsError::io("create skills root dir", source))?; - let dest_system = system_cache_root_dir_abs(&codex_home) - .map_err(|source| SystemSkillsError::io("resolve system skills cache root dir", source))?; + let dest_system = system_cache_root_dir_abs(&codex_home); - let marker_path = dest_system - .join(SYSTEM_SKILLS_MARKER_FILENAME) - .map_err(|source| SystemSkillsError::io("resolve system skills marker path", source))?; + let marker_path = dest_system.join(SYSTEM_SKILLS_MARKER_FILENAME); let expected_fingerprint = embedded_system_skills_fingerprint(); if dest_system.as_path().is_dir() && read_marker(&marker_path).is_ok_and(|marker| marker == expected_fingerprint) @@ -127,18 +122,14 @@ fn write_embedded_dir(dir: &Dir<'_>, dest: &AbsolutePathBuf) -> Result<(), Syste for entry in dir.entries() { match entry { include_dir::DirEntry::Dir(subdir) => { - let subdir_dest = dest.join(subdir.path()).map_err(|source| { - SystemSkillsError::io("resolve system skills subdir", source) - })?; + let subdir_dest = dest.join(subdir.path()); fs::create_dir_all(subdir_dest.as_path()).map_err(|source| { SystemSkillsError::io("create system skills subdir", source) })?; write_embedded_dir(subdir, dest)?; } include_dir::DirEntry::File(file) => { - let path = dest.join(file.path()).map_err(|source| { - SystemSkillsError::io("resolve system skills file", source) - })?; + let path = dest.join(file.path()); if let Some(parent) = path.as_path().parent() { fs::create_dir_all(parent).map_err(|source| { SystemSkillsError::io("create system skills file parent", source) diff --git a/codex-rs/state/migrations/0024_remote_control_enrollments.sql b/codex-rs/state/migrations/0024_remote_control_enrollments.sql new file mode 100644 index 0000000000..970db9ef20 --- /dev/null +++ b/codex-rs/state/migrations/0024_remote_control_enrollments.sql @@ -0,0 +1,10 @@ +CREATE TABLE remote_control_enrollments ( + websocket_url TEXT NOT NULL, + account_id TEXT NOT NULL, + app_server_client_name TEXT NOT NULL, + server_id TEXT NOT NULL, + environment_id TEXT NOT NULL, + server_name TEXT NOT NULL, + updated_at INTEGER NOT NULL, + PRIMARY KEY (websocket_url, account_id, app_server_client_name) +); diff --git a/codex-rs/state/src/extract.rs b/codex-rs/state/src/extract.rs index 8d35d393a8..31545e3f6c 100644 --- a/codex-rs/state/src/extract.rs +++ b/codex-rs/state/src/extract.rs @@ -257,6 +257,7 @@ mod tests { agent_role: None, model_provider: Some("openai".to_string()), base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, }, @@ -384,6 +385,7 @@ mod tests { agent_role: None, model_provider: Some("openai".to_string()), base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, }, diff --git a/codex-rs/state/src/lib.rs b/codex-rs/state/src/lib.rs index ffaa1637e2..efad7651f5 100644 --- a/codex-rs/state/src/lib.rs +++ b/codex-rs/state/src/lib.rs @@ -46,6 +46,7 @@ pub use model::Stage1StartupClaimParams; pub use model::ThreadMetadata; pub use model::ThreadMetadataBuilder; pub use model::ThreadsPage; +pub use runtime::RemoteControlEnrollmentRecord; pub use runtime::logs_db_filename; pub use runtime::logs_db_path; pub use runtime::state_db_filename; diff --git a/codex-rs/state/src/migrations.rs b/codex-rs/state/src/migrations.rs index 6e7b9d363c..883129a943 100644 --- a/codex-rs/state/src/migrations.rs +++ b/codex-rs/state/src/migrations.rs @@ -1,4 +1,29 @@ +use std::borrow::Cow; + use sqlx::migrate::Migrator; pub(crate) static STATE_MIGRATOR: Migrator = sqlx::migrate!("./migrations"); pub(crate) static LOGS_MIGRATOR: Migrator = sqlx::migrate!("./logs_migrations"); + +/// Allow an older Codex binary to open a database that has already been +/// migrated by a newer binary running in parallel. +/// +/// We intentionally ignore applied migration versions that are newer than the +/// embedded migration set. Known migration versions are still validated by +/// checksum, so this only relaxes the "database is ahead of me" case. +fn runtime_migrator(base: &'static Migrator) -> Migrator { + Migrator { + migrations: Cow::Borrowed(base.migrations.as_ref()), + ignore_missing: true, + locking: base.locking, + no_tx: base.no_tx, + } +} + +pub(crate) fn runtime_state_migrator() -> Migrator { + runtime_migrator(&STATE_MIGRATOR) +} + +pub(crate) fn runtime_logs_migrator() -> Migrator { + runtime_migrator(&LOGS_MIGRATOR) +} diff --git a/codex-rs/state/src/runtime.rs b/codex-rs/state/src/runtime.rs index 4d3af301b2..f71b6adf05 100644 --- a/codex-rs/state/src/runtime.rs +++ b/codex-rs/state/src/runtime.rs @@ -17,8 +17,8 @@ use crate::ThreadMetadata; use crate::ThreadMetadataBuilder; use crate::ThreadsPage; use crate::apply_rollout_item; -use crate::migrations::LOGS_MIGRATOR; -use crate::migrations::STATE_MIGRATOR; +use crate::migrations::runtime_logs_migrator; +use crate::migrations::runtime_state_migrator; use crate::model::AgentJobRow; use crate::model::ThreadRow; use crate::model::anchor_from_item; @@ -54,10 +54,13 @@ mod agent_jobs; mod backfill; mod logs; mod memories; +mod remote_control; #[cfg(test)] mod test_support; mod threads; +pub use remote_control::RemoteControlEnrollmentRecord; + // "Partition" is the retained-log-content bucket we cap at 10 MiB: // - one bucket per non-null thread_id // - one bucket per threadless (thread_id IS NULL) non-null process_uuid @@ -83,6 +86,8 @@ impl StateRuntime { /// rest of the state store. pub async fn init(codex_home: PathBuf, default_provider: String) -> anyhow::Result> { tokio::fs::create_dir_all(&codex_home).await?; + let state_migrator = runtime_state_migrator(); + let logs_migrator = runtime_logs_migrator(); let current_state_name = state_db_filename(); let current_logs_name = logs_db_filename(); remove_legacy_db_files( @@ -101,14 +106,14 @@ impl StateRuntime { .await; let state_path = state_db_path(codex_home.as_path()); let logs_path = logs_db_path(codex_home.as_path()); - let pool = match open_state_sqlite(&state_path, &STATE_MIGRATOR).await { + let pool = match open_state_sqlite(&state_path, &state_migrator).await { Ok(db) => Arc::new(db), Err(err) => { warn!("failed to open state db at {}: {err}", state_path.display()); return Err(err); } }; - let logs_pool = match open_logs_sqlite(&logs_path, &LOGS_MIGRATOR).await { + let logs_pool = match open_logs_sqlite(&logs_path, &logs_migrator).await { Ok(db) => Arc::new(db), Err(err) => { warn!("failed to open logs db at {}: {err}", logs_path.display()); @@ -146,7 +151,7 @@ fn base_sqlite_options(path: &Path) -> SqliteConnectOptions { .log_statements(LevelFilter::Off) } -async fn open_state_sqlite(path: &Path, migrator: &'static Migrator) -> anyhow::Result { +async fn open_state_sqlite(path: &Path, migrator: &Migrator) -> anyhow::Result { let options = base_sqlite_options(path).auto_vacuum(SqliteAutoVacuum::Incremental); let pool = SqlitePoolOptions::new() .max_connections(5) @@ -172,7 +177,7 @@ async fn open_state_sqlite(path: &Path, migrator: &'static Migrator) -> anyhow:: Ok(pool) } -async fn open_logs_sqlite(path: &Path, migrator: &'static Migrator) -> anyhow::Result { +async fn open_logs_sqlite(path: &Path, migrator: &Migrator) -> anyhow::Result { let options = base_sqlite_options(path).auto_vacuum(SqliteAutoVacuum::Incremental); let pool = SqlitePoolOptions::new() .max_connections(5) @@ -268,3 +273,74 @@ fn should_remove_db_file(file_name: &str, current_name: &str, base_name: &str) - }; !version_suffix.is_empty() && version_suffix.chars().all(|ch| ch.is_ascii_digit()) } + +#[cfg(test)] +mod tests { + use super::open_state_sqlite; + use super::runtime_state_migrator; + use super::state_db_path; + use super::test_support::unique_temp_dir; + use crate::migrations::STATE_MIGRATOR; + use sqlx::SqlitePool; + use sqlx::migrate::MigrateError; + use sqlx::sqlite::SqliteConnectOptions; + use std::path::Path; + + async fn open_db_pool(path: &Path) -> SqlitePool { + SqlitePool::connect_with( + SqliteConnectOptions::new() + .filename(path) + .create_if_missing(false), + ) + .await + .expect("open sqlite pool") + } + + #[tokio::test] + async fn open_state_sqlite_tolerates_newer_applied_migrations() { + let codex_home = unique_temp_dir(); + tokio::fs::create_dir_all(&codex_home) + .await + .expect("create codex home"); + let state_path = state_db_path(codex_home.as_path()); + let pool = SqlitePool::connect_with( + SqliteConnectOptions::new() + .filename(&state_path) + .create_if_missing(true), + ) + .await + .expect("open state db"); + STATE_MIGRATOR + .run(&pool) + .await + .expect("apply current state schema"); + sqlx::query( + "INSERT INTO _sqlx_migrations (version, description, success, checksum, execution_time) VALUES (?, ?, ?, ?, ?)", + ) + .bind(9_999_i64) + .bind("future migration") + .bind(true) + .bind(vec![1_u8, 2, 3, 4]) + .bind(1_i64) + .execute(&pool) + .await + .expect("insert future migration record"); + pool.close().await; + + let strict_pool = open_db_pool(state_path.as_path()).await; + let strict_err = STATE_MIGRATOR + .run(&strict_pool) + .await + .expect_err("strict migrator should reject newer applied migrations"); + assert!(matches!(strict_err, MigrateError::VersionMissing(9_999))); + strict_pool.close().await; + + let tolerant_migrator = runtime_state_migrator(); + let tolerant_pool = open_state_sqlite(state_path.as_path(), &tolerant_migrator) + .await + .expect("runtime migrator should tolerate newer applied migrations"); + tolerant_pool.close().await; + + let _ = tokio::fs::remove_dir_all(codex_home).await; + } +} diff --git a/codex-rs/state/src/runtime/remote_control.rs b/codex-rs/state/src/runtime/remote_control.rs new file mode 100644 index 0000000000..fa0b1823f8 --- /dev/null +++ b/codex-rs/state/src/runtime/remote_control.rs @@ -0,0 +1,283 @@ +use super::*; + +const REMOTE_CONTROL_APP_SERVER_CLIENT_NAME_NONE: &str = ""; + +/// Persisted remote-control server enrollment, including the lookup key. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RemoteControlEnrollmentRecord { + pub websocket_url: String, + pub account_id: String, + pub app_server_client_name: Option, + pub server_id: String, + pub environment_id: String, + pub server_name: String, +} + +fn remote_control_app_server_client_name_key(app_server_client_name: Option<&str>) -> &str { + app_server_client_name.unwrap_or(REMOTE_CONTROL_APP_SERVER_CLIENT_NAME_NONE) +} + +fn app_server_client_name_from_key(app_server_client_name: String) -> Option { + if app_server_client_name.is_empty() { + None + } else { + Some(app_server_client_name) + } +} + +impl StateRuntime { + pub async fn get_remote_control_enrollment( + &self, + websocket_url: &str, + account_id: &str, + app_server_client_name: Option<&str>, + ) -> anyhow::Result> { + let row = sqlx::query( + r#" +SELECT websocket_url, account_id, app_server_client_name, server_id, environment_id, server_name +FROM remote_control_enrollments +WHERE websocket_url = ? AND account_id = ? AND app_server_client_name = ? + "#, + ) + .bind(websocket_url) + .bind(account_id) + .bind(remote_control_app_server_client_name_key( + app_server_client_name, + )) + .fetch_optional(self.pool.as_ref()) + .await?; + + row.map(|row| { + let app_server_client_name: String = row.try_get("app_server_client_name")?; + Ok(RemoteControlEnrollmentRecord { + websocket_url: row.try_get("websocket_url")?, + account_id: row.try_get("account_id")?, + app_server_client_name: app_server_client_name_from_key(app_server_client_name), + server_id: row.try_get("server_id")?, + environment_id: row.try_get("environment_id")?, + server_name: row.try_get("server_name")?, + }) + }) + .transpose() + } + + pub async fn upsert_remote_control_enrollment( + &self, + enrollment: &RemoteControlEnrollmentRecord, + ) -> anyhow::Result<()> { + sqlx::query( + r#" +INSERT INTO remote_control_enrollments ( + websocket_url, + account_id, + app_server_client_name, + server_id, + environment_id, + server_name, + updated_at +) VALUES (?, ?, ?, ?, ?, ?, ?) +ON CONFLICT(websocket_url, account_id, app_server_client_name) DO UPDATE SET + server_id = excluded.server_id, + environment_id = excluded.environment_id, + server_name = excluded.server_name, + updated_at = excluded.updated_at + "#, + ) + .bind(&enrollment.websocket_url) + .bind(&enrollment.account_id) + .bind(remote_control_app_server_client_name_key( + enrollment.app_server_client_name.as_deref(), + )) + .bind(&enrollment.server_id) + .bind(&enrollment.environment_id) + .bind(&enrollment.server_name) + .bind(Utc::now().timestamp()) + .execute(self.pool.as_ref()) + .await?; + Ok(()) + } + + pub async fn delete_remote_control_enrollment( + &self, + websocket_url: &str, + account_id: &str, + app_server_client_name: Option<&str>, + ) -> anyhow::Result { + let result = sqlx::query( + r#" +DELETE FROM remote_control_enrollments +WHERE websocket_url = ? AND account_id = ? AND app_server_client_name = ? + "#, + ) + .bind(websocket_url) + .bind(account_id) + .bind(remote_control_app_server_client_name_key( + app_server_client_name, + )) + .execute(self.pool.as_ref()) + .await?; + Ok(result.rows_affected()) + } +} + +#[cfg(test)] +mod tests { + use super::RemoteControlEnrollmentRecord; + use super::StateRuntime; + use super::test_support::unique_temp_dir; + use pretty_assertions::assert_eq; + + #[tokio::test] + async fn remote_control_enrollment_round_trips_by_target_and_account() { + let codex_home = unique_temp_dir(); + let runtime = StateRuntime::init(codex_home.clone(), "test-provider".to_string()) + .await + .expect("initialize runtime"); + + runtime + .upsert_remote_control_enrollment(&RemoteControlEnrollmentRecord { + websocket_url: "wss://example.com/backend-api/wham/remote/control/server" + .to_string(), + account_id: "account-a".to_string(), + app_server_client_name: Some("desktop-client".to_string()), + server_id: "srv_e_first".to_string(), + environment_id: "env_first".to_string(), + server_name: "first-server".to_string(), + }) + .await + .expect("insert first enrollment"); + runtime + .upsert_remote_control_enrollment(&RemoteControlEnrollmentRecord { + websocket_url: "wss://example.com/backend-api/wham/remote/control/server" + .to_string(), + account_id: "account-b".to_string(), + app_server_client_name: Some("desktop-client".to_string()), + server_id: "srv_e_second".to_string(), + environment_id: "env_second".to_string(), + server_name: "second-server".to_string(), + }) + .await + .expect("insert second enrollment"); + + assert_eq!( + runtime + .get_remote_control_enrollment( + "wss://example.com/backend-api/wham/remote/control/server", + "account-a", + Some("desktop-client"), + ) + .await + .expect("load first enrollment"), + Some(RemoteControlEnrollmentRecord { + websocket_url: "wss://example.com/backend-api/wham/remote/control/server" + .to_string(), + account_id: "account-a".to_string(), + app_server_client_name: Some("desktop-client".to_string()), + server_id: "srv_e_first".to_string(), + environment_id: "env_first".to_string(), + server_name: "first-server".to_string(), + }) + ); + assert_eq!( + runtime + .get_remote_control_enrollment( + "wss://example.com/backend-api/wham/remote/control/server", + "account-missing", + Some("desktop-client"), + ) + .await + .expect("load missing enrollment"), + None + ); + assert_eq!( + runtime + .get_remote_control_enrollment( + "wss://example.com/backend-api/wham/remote/control/server", + "account-a", + Some("other-client"), + ) + .await + .expect("load wrong client enrollment"), + None + ); + + let _ = tokio::fs::remove_dir_all(codex_home).await; + } + + #[tokio::test] + async fn delete_remote_control_enrollment_removes_only_matching_entry() { + let codex_home = unique_temp_dir(); + let runtime = StateRuntime::init(codex_home.clone(), "test-provider".to_string()) + .await + .expect("initialize runtime"); + + runtime + .upsert_remote_control_enrollment(&RemoteControlEnrollmentRecord { + websocket_url: "wss://example.com/backend-api/wham/remote/control/server" + .to_string(), + account_id: "account-a".to_string(), + app_server_client_name: None, + server_id: "srv_e_first".to_string(), + environment_id: "env_first".to_string(), + server_name: "first-server".to_string(), + }) + .await + .expect("insert first enrollment"); + runtime + .upsert_remote_control_enrollment(&RemoteControlEnrollmentRecord { + websocket_url: "wss://example.com/backend-api/wham/remote/control/server" + .to_string(), + account_id: "account-b".to_string(), + app_server_client_name: None, + server_id: "srv_e_second".to_string(), + environment_id: "env_second".to_string(), + server_name: "second-server".to_string(), + }) + .await + .expect("insert second enrollment"); + + assert_eq!( + runtime + .delete_remote_control_enrollment( + "wss://example.com/backend-api/wham/remote/control/server", + "account-a", + /*app_server_client_name*/ None, + ) + .await + .expect("delete first enrollment"), + 1 + ); + assert_eq!( + runtime + .get_remote_control_enrollment( + "wss://example.com/backend-api/wham/remote/control/server", + "account-a", + /*app_server_client_name*/ None, + ) + .await + .expect("load deleted enrollment"), + None + ); + assert_eq!( + runtime + .get_remote_control_enrollment( + "wss://example.com/backend-api/wham/remote/control/server", + "account-b", + /*app_server_client_name*/ None, + ) + .await + .expect("load retained enrollment"), + Some(RemoteControlEnrollmentRecord { + websocket_url: "wss://example.com/backend-api/wham/remote/control/server" + .to_string(), + account_id: "account-b".to_string(), + app_server_client_name: None, + server_id: "srv_e_second".to_string(), + environment_id: "env_second".to_string(), + server_name: "second-server".to_string(), + }) + ); + + let _ = tokio::fs::remove_dir_all(codex_home).await; + } +} diff --git a/codex-rs/state/src/runtime/threads.rs b/codex-rs/state/src/runtime/threads.rs index 09b23a4319..06fd9a7b42 100644 --- a/codex-rs/state/src/runtime/threads.rs +++ b/codex-rs/state/src/runtime/threads.rs @@ -1030,6 +1030,7 @@ mod tests { agent_role: None, model_provider: None, base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: Some("polluted".to_string()), }, @@ -1088,6 +1089,7 @@ mod tests { agent_role: None, model_provider: None, base_instructions: None, + developer_instructions: None, dynamic_tools: None, memory_mode: None, }, diff --git a/codex-rs/tools/src/agent_tool.rs b/codex-rs/tools/src/agent_tool.rs index 017cc6e5ee..1a1eeb19dc 100644 --- a/codex-rs/tools/src/agent_tool.rs +++ b/codex-rs/tools/src/agent_tool.rs @@ -10,6 +10,7 @@ use std::collections::BTreeMap; pub struct SpawnAgentToolOptions<'a> { pub available_models: &'a [ModelPreset], pub agent_type_description: String, + pub hide_agent_type_model_reasoning: bool, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -20,15 +21,19 @@ pub struct WaitAgentTimeoutOptions { } pub fn create_spawn_agent_tool_v1(options: SpawnAgentToolOptions<'_>) -> ToolSpec { - let available_models_description = spawn_agent_models_description(options.available_models); + let available_models_description = (!options.hide_agent_type_model_reasoning) + .then(|| spawn_agent_models_description(options.available_models)); let return_value_description = "Returns the spawned agent id plus the user-facing nickname when available."; - let properties = spawn_agent_common_properties_v1(&options.agent_type_description); + let mut properties = spawn_agent_common_properties_v1(&options.agent_type_description); + if options.hide_agent_type_model_reasoning { + hide_spawn_agent_metadata_options(&mut properties); + } ToolSpec::Function(ResponsesApiTool { name: "spawn_agent".to_string(), description: spawn_agent_tool_description( - &available_models_description, + available_models_description.as_deref(), return_value_description, ), strict: false, @@ -43,9 +48,17 @@ pub fn create_spawn_agent_tool_v1(options: SpawnAgentToolOptions<'_>) -> ToolSpe } pub fn create_spawn_agent_tool_v2(options: SpawnAgentToolOptions<'_>) -> ToolSpec { - let available_models_description = spawn_agent_models_description(options.available_models); - let return_value_description = "Returns the canonical task name for the spawned agent, plus the user-facing nickname when available."; + let available_models_description = (!options.hide_agent_type_model_reasoning) + .then(|| spawn_agent_models_description(options.available_models)); + let return_value_description = if options.hide_agent_type_model_reasoning { + "Returns the canonical task name for the spawned agent." + } else { + "Returns the canonical task name for the spawned agent, plus the user-facing nickname when available." + }; let mut properties = spawn_agent_common_properties_v2(&options.agent_type_description); + if options.hide_agent_type_model_reasoning { + hide_spawn_agent_metadata_options(&mut properties); + } properties.insert( "task_name".to_string(), JsonSchema::String { @@ -59,7 +72,7 @@ pub fn create_spawn_agent_tool_v2(options: SpawnAgentToolOptions<'_>) -> ToolSpe ToolSpec::Function(ResponsesApiTool { name: "spawn_agent".to_string(), description: spawn_agent_tool_description( - &available_models_description, + available_models_description.as_deref(), return_value_description, ), strict: false, @@ -69,7 +82,9 @@ pub fn create_spawn_agent_tool_v2(options: SpawnAgentToolOptions<'_>) -> ToolSpe required: Some(vec!["task_name".to_string(), "message".to_string()]), additional_properties: Some(false.into()), }, - output_schema: Some(spawn_agent_output_schema_v2()), + output_schema: Some(spawn_agent_output_schema_v2( + options.hide_agent_type_model_reasoning, + )), }) } @@ -146,7 +161,7 @@ pub fn create_send_message_tool() -> ToolSpec { required: Some(vec!["target".to_string(), "message".to_string()]), additional_properties: Some(false.into()), }, - output_schema: Some(send_input_output_schema()), + output_schema: None, }) } @@ -188,7 +203,7 @@ pub fn create_followup_task_tool() -> ToolSpec { required: Some(vec!["target".to_string(), "message".to_string()]), additional_properties: Some(false.into()), }, - output_schema: Some(send_input_output_schema()), + output_schema: None, }) } @@ -362,14 +377,24 @@ fn spawn_agent_output_schema_v1() -> Value { }) } -fn spawn_agent_output_schema_v2() -> Value { +fn spawn_agent_output_schema_v2(hide_agent_metadata: bool) -> Value { + if hide_agent_metadata { + return json!({ + "type": "object", + "properties": { + "task_name": { + "type": "string", + "description": "Canonical task name for the spawned agent." + } + }, + "required": ["task_name"], + "additionalProperties": false + }); + } + json!({ "type": "object", "properties": { - "agent_id": { - "type": ["string", "null"], - "description": "Legacy thread identifier for the spawned agent." - }, "task_name": { "type": "string", "description": "Canonical task name for the spawned agent." @@ -379,7 +404,7 @@ fn spawn_agent_output_schema_v2() -> Value { "description": "User-facing nickname for the spawned agent when available." } }, - "required": ["agent_id", "task_name", "nickname"], + "required": ["task_name", "nickname"], "additionalProperties": false }) } @@ -637,18 +662,30 @@ fn spawn_agent_common_properties_v2(agent_type_description: &str) -> BTreeMap) { + properties.remove("agent_type"); + properties.remove("model"); + properties.remove("reasoning_effort"); +} + fn spawn_agent_tool_description( - available_models_description: &str, + available_models_description: Option<&str>, return_value_description: &str, ) -> String { + let agent_role_guidance = available_models_description + .map(|description| { + format!( + "Agent-role guidance below only helps choose which agent to use after spawning is already authorized; it never authorizes spawning by itself.\n{description}" + ) + }) + .unwrap_or_default(); format!( r#" Only use `spawn_agent` if and only if the user explicitly asks for sub-agents, delegation, or parallel agent work. Requests for depth, thoroughness, research, investigation, or detailed codebase analysis do not count as permission to spawn. - Agent-role guidance below only helps choose which agent to use after spawning is already authorized; it never authorizes spawning by itself. + {agent_role_guidance} Spawn a sub-agent for a well-scoped task. {return_value_description} This spawn_agent tool provides you access to smaller but more efficient sub-agents. A mini model can solve many tasks faster than the main model. You should follow the rules and guidelines below to use this tool. -{available_models_description} ### When to delegate vs. do the subtask yourself - First, quickly analyze the overall user task and form a succinct high-level plan. Identify which tasks are immediate blockers on the critical path, and which tasks are sidecar tasks that are needed but can run in parallel without blocking the next local step. As part of that plan, explicitly decide what immediate task you should do locally right now. Do this planning step before delegating to agents so you do not hand off the immediate blocking task to a submodel and then waste time waiting on it. - Use the smaller subagent when a subtask is easy enough for it to handle and can run in parallel with your local work. Prefer delegating concrete, bounded sidecar tasks that materially advance the main task without blocking your immediate next local step. diff --git a/codex-rs/tools/src/agent_tool_tests.rs b/codex-rs/tools/src/agent_tool_tests.rs index cd68fbd13a..fc1de7bc76 100644 --- a/codex-rs/tools/src/agent_tool_tests.rs +++ b/codex-rs/tools/src/agent_tool_tests.rs @@ -34,6 +34,7 @@ fn spawn_agent_tool_v2_requires_task_name_and_lists_visible_models() { model_preset("hidden", /*show_in_picker*/ false), ], agent_type_description: "role help".to_string(), + hide_agent_type_model_reasoning: false, }); let ToolSpec::Function(ResponsesApiTool { @@ -72,7 +73,7 @@ fn spawn_agent_tool_v2_requires_task_name_and_lists_visible_models() { ); assert_eq!( output_schema.expect("spawn_agent output schema")["required"], - json!(["agent_id", "task_name", "nickname"]) + json!(["task_name", "nickname"]) ); } @@ -81,6 +82,7 @@ fn spawn_agent_tool_v1_keeps_legacy_fork_context_field() { let tool = create_spawn_agent_tool_v1(SpawnAgentToolOptions { available_models: &[], agent_type_description: "role help".to_string(), + hide_agent_type_model_reasoning: false, }); let ToolSpec::Function(ResponsesApiTool { parameters, .. }) = tool else { @@ -95,7 +97,7 @@ fn spawn_agent_tool_v1_keeps_legacy_fork_context_field() { } #[test] -fn send_message_tool_requires_message_and_uses_submission_output() { +fn send_message_tool_requires_message_and_has_no_output_schema() { let ToolSpec::Function(ResponsesApiTool { parameters, output_schema, @@ -120,14 +122,11 @@ fn send_message_tool_requires_message_and_uses_submission_output() { required, Some(vec!["target".to_string(), "message".to_string()]) ); - assert_eq!( - output_schema.expect("send_message output schema")["required"], - json!(["submission_id"]) - ); + assert_eq!(output_schema, None); } #[test] -fn followup_task_tool_requires_message_and_uses_submission_output() { +fn followup_task_tool_requires_message_and_has_no_output_schema() { let ToolSpec::Function(ResponsesApiTool { parameters, output_schema, @@ -152,10 +151,7 @@ fn followup_task_tool_requires_message_and_uses_submission_output() { required, Some(vec!["target".to_string(), "message".to_string()]) ); - assert_eq!( - output_schema.expect("followup_task output schema")["required"], - json!(["submission_id"]) - ); + assert_eq!(output_schema, None); } #[test] diff --git a/codex-rs/tools/src/tool_config.rs b/codex-rs/tools/src/tool_config.rs index f48f1b8fcc..1b55d2c4f4 100644 --- a/codex-rs/tools/src/tool_config.rs +++ b/codex-rs/tools/src/tool_config.rs @@ -86,6 +86,7 @@ pub struct ToolsConfig { pub shell_type: ConfigShellToolType, pub shell_command_backend: ShellCommandBackendConfig, pub unified_exec_shell_mode: UnifiedExecShellMode, + pub has_environment: bool, pub allow_login_shell: bool, pub apply_patch_tool_type: Option, pub web_search_mode: Option, @@ -103,7 +104,7 @@ pub struct ToolsConfig { pub can_request_original_image_detail: bool, pub collab_tools: bool, pub multi_agent_v2: bool, - pub request_user_input: bool, + pub hide_spawn_agent_metadata: bool, pub default_mode_request_user_input: bool, pub experimental_supported_tools: Vec, pub agent_jobs_tools: bool, @@ -140,10 +141,10 @@ impl ToolsConfig { include_js_repl && features.enabled(Feature::JsReplToolsOnly); let include_collab_tools = features.enabled(Feature::Collab); let include_multi_agent_v2 = features.enabled(Feature::MultiAgentV2); + let hide_spawn_agent_metadata = features.enabled(Feature::DebugHideSpawnAgentMetadata); let include_agent_jobs = features.enabled(Feature::SpawnCsv); - let include_request_user_input = !matches!(session_source, SessionSource::SubAgent(_)); let include_default_mode_request_user_input = - include_request_user_input && features.enabled(Feature::DefaultModeRequestUserInput); + features.enabled(Feature::DefaultModeRequestUserInput); let include_search_tool = model_info.supports_search_tool && features.enabled(Feature::ToolSearch); let include_tool_suggest = features.enabled(Feature::ToolSuggest) @@ -200,6 +201,7 @@ impl ToolsConfig { shell_type, shell_command_backend, unified_exec_shell_mode: UnifiedExecShellMode::Direct, + has_environment: true, allow_login_shell: true, apply_patch_tool_type, web_search_mode: *web_search_mode, @@ -217,7 +219,7 @@ impl ToolsConfig { can_request_original_image_detail: include_original_image_detail, collab_tools: include_collab_tools, multi_agent_v2: include_multi_agent_v2, - request_user_input: include_request_user_input, + hide_spawn_agent_metadata, default_mode_request_user_input: include_default_mode_request_user_input, experimental_supported_tools: model_info.experimental_supported_tools.clone(), agent_jobs_tools: include_agent_jobs, @@ -236,6 +238,11 @@ impl ToolsConfig { self } + pub fn with_has_environment(mut self, has_environment: bool) -> Self { + self.has_environment = has_environment; + self + } + pub fn with_unified_exec_shell_mode( mut self, unified_exec_shell_mode: UnifiedExecShellMode, diff --git a/codex-rs/tools/src/tool_config_tests.rs b/codex-rs/tools/src/tool_config_tests.rs index 168c8e80a0..5f121ff020 100644 --- a/codex-rs/tools/src/tool_config_tests.rs +++ b/codex-rs/tools/src/tool_config_tests.rs @@ -131,9 +131,10 @@ fn shell_zsh_fork_prefers_shell_command_over_unified_exec() { } #[test] -fn subagents_disable_request_user_input_and_agent_jobs_workers_opt_in_by_label() { +fn subagents_keep_request_user_input_mode_config_and_agent_jobs_workers_opt_in_by_label() { let model_info = model_info(); let mut features = Features::with_defaults(); + features.enable(Feature::DefaultModeRequestUserInput); features.enable(Feature::SpawnCsv); let available_models = Vec::new(); @@ -149,8 +150,7 @@ fn subagents_disable_request_user_input_and_agent_jobs_workers_opt_in_by_label() windows_sandbox_level: WindowsSandboxLevel::Disabled, }); - assert!(!tools_config.request_user_input); - assert!(!tools_config.default_mode_request_user_input); + assert!(tools_config.default_mode_request_user_input); assert!(tools_config.agent_jobs_tools); assert!(tools_config.agent_jobs_worker_tools); } diff --git a/codex-rs/tools/src/tool_registry_plan.rs b/codex-rs/tools/src/tool_registry_plan.rs index cc706f24fc..dc76c12d36 100644 --- a/codex-rs/tools/src/tool_registry_plan.rs +++ b/codex-rs/tools/src/tool_registry_plan.rs @@ -107,54 +107,56 @@ pub fn build_tool_registry_plan( ); } - match &config.shell_type { - ConfigShellToolType::Default => { - plan.push_spec( - create_shell_tool(ShellToolOptions { - exec_permission_approvals_enabled, - }), - /*supports_parallel_tool_calls*/ true, - config.code_mode_enabled, - ); - } - ConfigShellToolType::Local => { - plan.push_spec( - create_local_shell_tool(), - /*supports_parallel_tool_calls*/ true, - config.code_mode_enabled, - ); - } - ConfigShellToolType::UnifiedExec => { - plan.push_spec( - create_exec_command_tool(CommandToolOptions { - allow_login_shell: config.allow_login_shell, - exec_permission_approvals_enabled, - }), - /*supports_parallel_tool_calls*/ true, - config.code_mode_enabled, - ); - plan.push_spec( - create_write_stdin_tool(), - /*supports_parallel_tool_calls*/ false, - config.code_mode_enabled, - ); - plan.register_handler("exec_command", ToolHandlerKind::UnifiedExec); - plan.register_handler("write_stdin", ToolHandlerKind::UnifiedExec); - } - ConfigShellToolType::Disabled => {} - ConfigShellToolType::ShellCommand => { - plan.push_spec( - create_shell_command_tool(CommandToolOptions { - allow_login_shell: config.allow_login_shell, - exec_permission_approvals_enabled, - }), - /*supports_parallel_tool_calls*/ true, - config.code_mode_enabled, - ); + if config.has_environment { + match &config.shell_type { + ConfigShellToolType::Default => { + plan.push_spec( + create_shell_tool(ShellToolOptions { + exec_permission_approvals_enabled, + }), + /*supports_parallel_tool_calls*/ true, + config.code_mode_enabled, + ); + } + ConfigShellToolType::Local => { + plan.push_spec( + create_local_shell_tool(), + /*supports_parallel_tool_calls*/ true, + config.code_mode_enabled, + ); + } + ConfigShellToolType::UnifiedExec => { + plan.push_spec( + create_exec_command_tool(CommandToolOptions { + allow_login_shell: config.allow_login_shell, + exec_permission_approvals_enabled, + }), + /*supports_parallel_tool_calls*/ true, + config.code_mode_enabled, + ); + plan.push_spec( + create_write_stdin_tool(), + /*supports_parallel_tool_calls*/ false, + config.code_mode_enabled, + ); + plan.register_handler("exec_command", ToolHandlerKind::UnifiedExec); + plan.register_handler("write_stdin", ToolHandlerKind::UnifiedExec); + } + ConfigShellToolType::Disabled => {} + ConfigShellToolType::ShellCommand => { + plan.push_spec( + create_shell_command_tool(CommandToolOptions { + allow_login_shell: config.allow_login_shell, + exec_permission_approvals_enabled, + }), + /*supports_parallel_tool_calls*/ true, + config.code_mode_enabled, + ); + } } } - if config.shell_type != ConfigShellToolType::Disabled { + if config.has_environment && config.shell_type != ConfigShellToolType::Disabled { plan.register_handler("shell", ToolHandlerKind::Shell); plan.register_handler("container.exec", ToolHandlerKind::Shell); plan.register_handler("local_shell", ToolHandlerKind::Shell); @@ -189,7 +191,7 @@ pub fn build_tool_registry_plan( ); plan.register_handler("update_plan", ToolHandlerKind::Plan); - if config.js_repl_enabled { + if config.has_environment && config.js_repl_enabled { plan.push_spec( create_js_repl_tool(), /*supports_parallel_tool_calls*/ false, @@ -204,19 +206,17 @@ pub fn build_tool_registry_plan( plan.register_handler("js_repl_reset", ToolHandlerKind::JsReplReset); } - if config.request_user_input { - plan.push_spec( - create_request_user_input_tool(request_user_input_tool_description( - config.default_mode_request_user_input, - )), - /*supports_parallel_tool_calls*/ false, - config.code_mode_enabled, - ); - plan.register_handler( - REQUEST_USER_INPUT_TOOL_NAME, - ToolHandlerKind::RequestUserInput, - ); - } + plan.push_spec( + create_request_user_input_tool(request_user_input_tool_description( + config.default_mode_request_user_input, + )), + /*supports_parallel_tool_calls*/ false, + config.code_mode_enabled, + ); + plan.register_handler( + REQUEST_USER_INPUT_TOOL_NAME, + ToolHandlerKind::RequestUserInput, + ); if config.request_permissions_tool_enabled { plan.push_spec( @@ -265,7 +265,9 @@ pub fn build_tool_registry_plan( plan.register_handler(TOOL_SUGGEST_TOOL_NAME, ToolHandlerKind::ToolSuggest); } - if let Some(apply_patch_tool_type) = &config.apply_patch_tool_type { + if config.has_environment + && let Some(apply_patch_tool_type) = &config.apply_patch_tool_type + { match apply_patch_tool_type { ApplyPatchToolType::Freeform => { plan.push_spec( @@ -285,10 +287,11 @@ pub fn build_tool_registry_plan( plan.register_handler("apply_patch", ToolHandlerKind::ApplyPatch); } - if config - .experimental_supported_tools - .iter() - .any(|tool| tool == "list_dir") + if config.has_environment + && config + .experimental_supported_tools + .iter() + .any(|tool| tool == "list_dir") { plan.push_spec( create_list_dir_tool(), @@ -331,14 +334,16 @@ pub fn build_tool_registry_plan( ); } - plan.push_spec( - create_view_image_tool(ViewImageToolOptions { - can_request_original_image_detail: config.can_request_original_image_detail, - }), - /*supports_parallel_tool_calls*/ true, - config.code_mode_enabled, - ); - plan.register_handler("view_image", ToolHandlerKind::ViewImage); + if config.has_environment { + plan.push_spec( + create_view_image_tool(ViewImageToolOptions { + can_request_original_image_detail: config.can_request_original_image_detail, + }), + /*supports_parallel_tool_calls*/ true, + config.code_mode_enabled, + ); + plan.register_handler("view_image", ToolHandlerKind::ViewImage); + } if config.collab_tools { if config.multi_agent_v2 { @@ -348,6 +353,7 @@ pub fn build_tool_registry_plan( create_spawn_agent_tool_v2(SpawnAgentToolOptions { available_models: &config.available_models, agent_type_description, + hide_agent_type_model_reasoning: config.hide_spawn_agent_metadata, }), /*supports_parallel_tool_calls*/ false, config.code_mode_enabled, @@ -390,6 +396,7 @@ pub fn build_tool_registry_plan( create_spawn_agent_tool_v1(SpawnAgentToolOptions { available_models: &config.available_models, agent_type_description, + hide_agent_type_model_reasoning: config.hide_spawn_agent_metadata, }), /*supports_parallel_tool_calls*/ false, config.code_mode_enabled, diff --git a/codex-rs/tools/src/tool_registry_plan_tests.rs b/codex-rs/tools/src/tool_registry_plan_tests.rs index fd7ab10073..d8d264e2e8 100644 --- a/codex-rs/tools/src/tool_registry_plan_tests.rs +++ b/codex-rs/tools/src/tool_registry_plan_tests.rs @@ -243,15 +243,18 @@ fn test_build_specs_multi_agent_v2_uses_task_names_and_hides_resume() { let output_schema = output_schema .as_ref() .expect("spawn_agent should define output schema"); - assert_eq!( - output_schema["required"], - json!(["agent_id", "task_name", "nickname"]) - ); + assert_eq!(output_schema["required"], json!(["task_name", "nickname"])); let send_message = find_tool(&tools, "send_message"); - let ToolSpec::Function(ResponsesApiTool { parameters, .. }) = &send_message.spec else { + let ToolSpec::Function(ResponsesApiTool { + parameters, + output_schema, + .. + }) = &send_message.spec + else { panic!("send_message should be a function tool"); }; + assert_eq!(output_schema, &None); let JsonSchema::Object { properties, required, @@ -270,9 +273,15 @@ fn test_build_specs_multi_agent_v2_uses_task_names_and_hides_resume() { ); let followup_task = find_tool(&tools, "followup_task"); - let ToolSpec::Function(ResponsesApiTool { parameters, .. }) = &followup_task.spec else { + let ToolSpec::Function(ResponsesApiTool { + parameters, + output_schema, + .. + }) = &followup_task.spec + else { panic!("followup_task should be a function tool"); }; + assert_eq!(output_schema, &None); let JsonSchema::Object { properties, required, @@ -453,6 +462,42 @@ fn view_image_tool_includes_detail_with_original_detail_feature() { assert!(description.contains("omit this field for default resized behavior")); } +#[test] +fn disabled_environment_omits_environment_backed_tools() { + let model_info = model_info(); + let mut features = Features::with_defaults(); + features.enable(Feature::UnifiedExec); + features.enable(Feature::JsRepl); + let available_models = Vec::new(); + let mut tools_config = ToolsConfig::new(&ToolsConfigParams { + model_info: &model_info, + available_models: &available_models, + features: &features, + web_search_mode: Some(WebSearchMode::Cached), + session_source: SessionSource::Cli, + sandbox_policy: &SandboxPolicy::DangerFullAccess, + windows_sandbox_level: WindowsSandboxLevel::Disabled, + }) + .with_has_environment(/*has_environment*/ false); + tools_config + .experimental_supported_tools + .push("list_dir".to_string()); + let (tools, _) = build_specs( + &tools_config, + /*mcp_tools*/ None, + /*app_tools*/ None, + &[], + ); + + assert_lacks_tool_name(&tools, "exec_command"); + assert_lacks_tool_name(&tools, "write_stdin"); + assert_lacks_tool_name(&tools, "js_repl"); + assert_lacks_tool_name(&tools, "js_repl_reset"); + assert_lacks_tool_name(&tools, "apply_patch"); + assert_lacks_tool_name(&tools, "list_dir"); + assert_lacks_tool_name(&tools, VIEW_IMAGE_TOOL_NAME); +} + #[test] fn test_build_specs_agent_job_worker_tools_enabled() { let model_info = model_info(); @@ -489,9 +534,9 @@ fn test_build_specs_agent_job_worker_tools_enabled() { "close_agent", "spawn_agents_on_csv", "report_agent_job_result", + REQUEST_USER_INPUT_TOOL_NAME, ], ); - assert_lacks_tool_name(&tools, "request_user_input"); } #[test] @@ -1820,6 +1865,7 @@ fn spawn_agent_tool_options(config: &ToolsConfig) -> SpawnAgentToolOptions<'_> { SpawnAgentToolOptions { available_models: &config.available_models, agent_type_description: agent_type_description(config, DEFAULT_AGENT_TYPE_DESCRIPTION), + hide_agent_type_model_reasoning: config.hide_spawn_agent_metadata, } } diff --git a/codex-rs/tui/Cargo.toml b/codex-rs/tui/Cargo.toml index 8c68de0da6..713a5b38ae 100644 --- a/codex-rs/tui/Cargo.toml +++ b/codex-rs/tui/Cargo.toml @@ -33,6 +33,7 @@ codex-chatgpt = { workspace = true } codex-cloud-requirements = { workspace = true } codex-config = { workspace = true } codex-core = { workspace = true } +codex-exec-server = { workspace = true } codex-features = { workspace = true } codex-feedback = { workspace = true } codex-file-search = { workspace = true } diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index 4feefcac93..dad10fa766 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -64,6 +64,7 @@ use codex_app_server_protocol::GetAccountRateLimitsResponse; use codex_app_server_protocol::ListMcpServerStatusParams; use codex_app_server_protocol::ListMcpServerStatusResponse; use codex_app_server_protocol::McpServerStatus; +use codex_app_server_protocol::McpServerStatusDetail; use codex_app_server_protocol::PluginInstallParams; use codex_app_server_protocol::PluginInstallResponse; use codex_app_server_protocol::PluginListParams; @@ -84,13 +85,14 @@ use codex_app_server_protocol::TurnError as AppServerTurnError; use codex_app_server_protocol::TurnStatus; use codex_config::types::ApprovalsReviewer; use codex_config::types::ModelAvailabilityNuxConfig; +use codex_core::append_message_history_entry; use codex_core::config::Config; use codex_core::config::ConfigBuilder; use codex_core::config::ConfigOverrides; use codex_core::config::edit::ConfigEdit; use codex_core::config::edit::ConfigEditsBuilder; use codex_core::config_loader::ConfigLayerStackOrdering; -use codex_core::message_history; +use codex_core::lookup_message_history_entry; #[cfg(target_os = "windows")] use codex_core::windows_sandbox::WindowsSandboxLevelExt; use codex_features::Feature; @@ -314,12 +316,13 @@ fn session_summary( thread_id: Option, thread_name: Option, ) -> Option { - if token_usage.is_zero() { + let usage_line = (!token_usage.is_zero()).then(|| FinalOutput::from(token_usage).to_string()); + let resume_command = codex_core::util::resume_command(thread_name.as_deref(), thread_id); + + if usage_line.is_none() && resume_command.is_none() { return None; } - let usage_line = FinalOutput::from(token_usage).to_string(); - let resume_command = codex_core::util::resume_command(thread_name.as_deref(), thread_id); Some(SessionSummary { usage_line, resume_command, @@ -484,7 +487,7 @@ fn emit_system_bwrap_warning(app_event_tx: &AppEventSender, config: &Config) { #[derive(Debug, Clone, PartialEq, Eq)] struct SessionSummary { - usage_line: String, + usage_line: Option, resume_command: Option, } @@ -1021,7 +1024,7 @@ fn normalize_harness_overrides_for_cwd( let mut normalized = Vec::with_capacity(overrides.additional_writable_roots.len()); for root in overrides.additional_writable_roots.drain(..) { - let absolute = AbsolutePathBuf::resolve_path_against_base(root, base_cwd)?; + let absolute = AbsolutePathBuf::resolve_path_against_base(root, base_cwd); normalized.push(absolute.into_path_buf()); } overrides.additional_writable_roots = normalized; @@ -1871,8 +1874,8 @@ impl App { Ok(()) } - /// Spawn a background task that fetches the full MCP server inventory from the - /// app-server via paginated RPCs, then delivers the result back through + /// Spawn a background task that fetches MCP server status from the app-server + /// via paginated RPCs, then delivers the result back through /// `AppEvent::McpInventoryLoaded`. /// /// The spawned task is fire-and-forget: no `JoinHandle` is stored, so a stale @@ -2125,7 +2128,9 @@ impl App { self.chat_widget .add_to_history(history_cell::new_mcp_tools_output_from_statuses( - &config, &statuses, + &config, + &statuses, + McpServerStatusDetail::ToolsAndAuthOnly, )); } @@ -2156,8 +2161,7 @@ impl App { let text = text.clone(); let config = self.chat_widget.config_ref().clone(); tokio::spawn(async move { - if let Err(err) = - message_history::append_entry(&text, &thread_id, &config).await + if let Err(err) = append_message_history_entry(&text, &thread_id, &config).await { tracing::warn!( thread_id = %thread_id, @@ -2175,7 +2179,7 @@ impl App { let app_event_tx = self.app_event_tx.clone(); tokio::spawn(async move { let entry_opt = tokio::task::spawn_blocking(move || { - message_history::lookup(log_id, offset, &config) + lookup_message_history_entry(log_id, offset, &config) }) .await .unwrap_or_else(|err| { @@ -3298,7 +3302,10 @@ impl App { "Failed to attach to fresh app-server thread: {err}" )); } else if let Some(summary) = summary { - let mut lines: Vec> = vec![summary.usage_line.clone().into()]; + let mut lines: Vec> = Vec::new(); + if let Some(usage_line) = summary.usage_line { + lines.push(usage_line.into()); + } if let Some(command) = summary.resume_command { let spans = vec!["To continue this session, run ".into(), command.cyan()]; lines.push(spans.into()); @@ -4111,8 +4118,10 @@ impl App { { Ok(()) => { if let Some(summary) = summary { - let mut lines: Vec> = - vec![summary.usage_line.clone().into()]; + let mut lines: Vec> = Vec::new(); + if let Some(usage_line) = summary.usage_line { + lines.push(usage_line.into()); + } if let Some(command) = summary.resume_command { let spans = vec![ "To continue this session, run ".into(), @@ -4171,8 +4180,10 @@ impl App { { Ok(()) => { if let Some(summary) = summary { - let mut lines: Vec> = - vec![summary.usage_line.clone().into()]; + let mut lines: Vec> = Vec::new(); + if let Some(usage_line) = summary.usage_line { + lines.push(usage_line.into()); + } if let Some(command) = summary.resume_command { let spans = vec![ "To continue this session, run ".into(), @@ -4690,7 +4701,7 @@ impl App { tokio::task::spawn_blocking(move || { let requested_path = PathBuf::from(path); - let event = match codex_core::windows_sandbox_read_grants::grant_read_root_non_elevated( + let event = match codex_core::grant_read_root_non_elevated( &policy, policy_cwd.as_path(), command_cwd.as_path(), @@ -6002,8 +6013,9 @@ impl App { } } -/// Collect every MCP server status from the app-server by walking the paginated -/// `mcpServerStatus/list` RPC until no `next_cursor` is returned. +/// Collect every MCP server status needed for `/mcp` from the app-server by +/// walking the paginated `mcpServerStatus/list` RPC until no `next_cursor` is +/// returned. /// /// All pages are eagerly gathered into a single `Vec` so the caller can render /// the inventory atomically. Each page requests up to 100 entries. @@ -6021,6 +6033,7 @@ async fn fetch_all_mcp_server_statuses( params: ListMcpServerStatusParams { cursor: cursor.clone(), limit: Some(100), + detail: Some(McpServerStatusDetail::ToolsAndAuthOnly), }, }) .await @@ -9186,13 +9199,19 @@ guardian_approval = true items, status, error: None, + started_at: None, + completed_at: None, + duration_ms: None, } } fn turn_started_notification(thread_id: ThreadId, turn_id: &str) -> ServerNotification { ServerNotification::TurnStarted(TurnStartedNotification { thread_id: thread_id.to_string(), - turn: test_turn(turn_id, TurnStatus::InProgress, Vec::new()), + turn: Turn { + started_at: Some(0), + ..test_turn(turn_id, TurnStatus::InProgress, Vec::new()) + }, }) } @@ -9203,7 +9222,11 @@ guardian_approval = true ) -> ServerNotification { ServerNotification::TurnCompleted(TurnCompletedNotification { thread_id: thread_id.to_string(), - turn: test_turn(turn_id, status, Vec::new()), + turn: Turn { + completed_at: Some(0), + duration_ms: Some(1), + ..test_turn(turn_id, status, Vec::new()) + }, }) } @@ -10424,6 +10447,9 @@ guardian_approval = true }], status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, Turn { id: "turn-2".to_string(), @@ -10444,6 +10470,9 @@ guardian_approval = true ], status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, ], events: Vec::new(), @@ -10876,7 +10905,7 @@ guardian_approval = true } #[tokio::test] - async fn session_summary_skip_zero_usage() { + async fn session_summary_skips_when_no_usage_or_resume_hint() { assert!( session_summary( TokenUsage::default(), @@ -10901,7 +10930,7 @@ guardian_approval = true session_summary(usage, Some(conversation), /*thread_name*/ None).expect("summary"); assert_eq!( summary.usage_line, - "Token usage: total=12 input=10 output=2" + Some("Token usage: total=12 input=10 output=2".to_string()) ); assert_eq!( summary.resume_command, diff --git a/codex-rs/tui/src/app/app_server_adapter.rs b/codex-rs/tui/src/app/app_server_adapter.rs index ef5a061e32..995af44b28 100644 --- a/codex-rs/tui/src/app/app_server_adapter.rs +++ b/codex-rs/tui/src/app/app_server_adapter.rs @@ -501,6 +501,7 @@ fn server_notification_thread_events( id: String::new(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: notification.turn.id, + started_at: notification.turn.started_at, model_context_window: None, collaboration_mode_kind: ModeKind::default(), }), @@ -676,6 +677,7 @@ fn turn_snapshot_events( id: String::new(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: turn.id.clone(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::default(), }), @@ -741,6 +743,8 @@ fn append_terminal_turn_events(events: &mut Vec, turn: &Turn, include_fai msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: turn.id.clone(), last_agent_message: None, + completed_at: turn.completed_at, + duration_ms: turn.duration_ms, }), }), TurnStatus::Interrupted => events.push(Event { @@ -748,6 +752,8 @@ fn append_terminal_turn_events(events: &mut Vec, turn: &Turn, include_fai msg: EventMsg::TurnAborted(TurnAbortedEvent { turn_id: Some(turn.id.clone()), reason: TurnAbortReason::Interrupted, + completed_at: turn.completed_at, + duration_ms: turn.duration_ms, }), }), TurnStatus::Failed => { @@ -768,6 +774,8 @@ fn append_terminal_turn_events(events: &mut Vec, turn: &Turn, include_fai msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: turn.id.clone(), last_agent_message: None, + completed_at: turn.completed_at, + duration_ms: turn.duration_ms, }), }); } @@ -1103,6 +1111,9 @@ mod tests { items: Vec::new(), status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: Some(0), + duration_ms: None, }, }), ) @@ -1121,6 +1132,8 @@ mod tests { }; assert_eq!(completed.turn_id, turn_id); assert_eq!(completed.last_agent_message, None); + assert_eq!(completed.completed_at, Some(0)); + assert_eq!(completed.duration_ms, None); } #[test] @@ -1284,6 +1297,9 @@ mod tests { }], status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }], }; @@ -1315,6 +1331,9 @@ mod tests { items: Vec::new(), status: TurnStatus::Interrupted, error: None, + started_at: None, + completed_at: Some(0), + duration_ms: None, }, }), ) @@ -1351,6 +1370,9 @@ mod tests { codex_error_info: Some(CodexErrorInfo::Other), additional_details: None, }), + started_at: None, + completed_at: Some(0), + duration_ms: None, }, }), ) @@ -1453,12 +1475,18 @@ mod tests { ], status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, Turn { id: "turn-interrupted".to_string(), items: Vec::new(), status: TurnStatus::Interrupted, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, Turn { id: "turn-failed".to_string(), @@ -1469,6 +1497,9 @@ mod tests { codex_error_info: Some(CodexErrorInfo::Other), additional_details: None, }), + started_at: None, + completed_at: None, + duration_ms: None, }, ], }, @@ -1481,7 +1512,10 @@ mod tests { assert!(matches!(events[2].msg, EventMsg::ItemCompleted(_))); assert!(matches!(events[3].msg, EventMsg::TurnComplete(_))); assert!(matches!(events[4].msg, EventMsg::TurnStarted(_))); - let EventMsg::TurnAborted(TurnAbortedEvent { turn_id, reason }) = &events[5].msg else { + let EventMsg::TurnAborted(TurnAbortedEvent { + turn_id, reason, .. + }) = &events[5].msg + else { panic!("expected interrupted turn replay"); }; assert_eq!(turn_id.as_deref(), Some("turn-interrupted")); @@ -1528,6 +1562,9 @@ mod tests { ], status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, /*show_raw_agent_reasoning*/ false, ); @@ -1571,6 +1608,9 @@ mod tests { }], status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }, /*show_raw_agent_reasoning*/ true, ); diff --git a/codex-rs/tui/src/app/pending_interactive_replay.rs b/codex-rs/tui/src/app/pending_interactive_replay.rs index 63c8fe1249..6bae217362 100644 --- a/codex-rs/tui/src/app/pending_interactive_replay.rs +++ b/codex-rs/tui/src/app/pending_interactive_replay.rs @@ -676,6 +676,9 @@ mod tests { items: Vec::new(), status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: Some(0), + duration_ms: Some(1), }, }) } diff --git a/codex-rs/tui/src/app_server_session.rs b/codex-rs/tui/src/app_server_session.rs index 08e8d237b5..03072632bc 100644 --- a/codex-rs/tui/src/app_server_session.rs +++ b/codex-rs/tui/src/app_server_session.rs @@ -63,8 +63,10 @@ use codex_app_server_protocol::TurnStartParams; use codex_app_server_protocol::TurnStartResponse; use codex_app_server_protocol::TurnSteerParams; use codex_app_server_protocol::TurnSteerResponse; +#[cfg(test)] +use codex_core::append_message_history_entry; use codex_core::config::Config; -use codex_core::message_history; +use codex_core::message_history_metadata; use codex_otel::TelemetryAuthMode; use codex_protocol::ThreadId; use codex_protocol::openai_models::ModelAvailabilityNux; @@ -1076,7 +1078,7 @@ async fn thread_session_state_from_thread_response( .map(ThreadId::from_string) .transpose() .map_err(|err| format!("forked_from_id is invalid: {err}"))?; - let (history_log_id, history_entry_count) = message_history::history_metadata(config).await; + let (history_log_id, history_entry_count) = message_history_metadata(config).await; let history_entry_count = u64::try_from(history_entry_count).unwrap_or(u64::MAX); Ok(ThreadSessionState { @@ -1287,6 +1289,9 @@ mod tests { ], status: TurnStatus::Completed, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }], }, model: "gpt-5.4".to_string(), @@ -1313,10 +1318,10 @@ mod tests { let config = build_config(&temp_dir).await; let thread_id = ThreadId::new(); - message_history::append_entry("older", &thread_id, &config) + append_message_history_entry("older", &thread_id, &config) .await .expect("history append should succeed"); - message_history::append_entry("newer", &thread_id, &config) + append_message_history_entry("newer", &thread_id, &config) .await .expect("history append should succeed"); diff --git a/codex-rs/tui/src/bottom_pane/feedback_view.rs b/codex-rs/tui/src/bottom_pane/feedback_view.rs index f4f641c5d5..b1889abca0 100644 --- a/codex-rs/tui/src/bottom_pane/feedback_view.rs +++ b/codex-rs/tui/src/bottom_pane/feedback_view.rs @@ -1,5 +1,5 @@ -use codex_feedback::feedback_diagnostics::FEEDBACK_DIAGNOSTICS_ATTACHMENT_FILENAME; -use codex_feedback::feedback_diagnostics::FeedbackDiagnostics; +use codex_feedback::FEEDBACK_DIAGNOSTICS_ATTACHMENT_FILENAME; +use codex_feedback::FeedbackDiagnostics; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; use crossterm::event::KeyModifiers; @@ -556,7 +556,7 @@ mod tests { use super::*; use crate::app_event::AppEvent; use crate::app_event_sender::AppEventSender; - use codex_feedback::feedback_diagnostics::FeedbackDiagnostic; + use codex_feedback::FeedbackDiagnostic; use pretty_assertions::assert_eq; fn render(view: &FeedbackNoteView, width: u16) -> String { diff --git a/codex-rs/tui/src/bottom_pane/mod.rs b/codex-rs/tui/src/bottom_pane/mod.rs index da5e2421f7..cbb698e3a2 100644 --- a/codex-rs/tui/src/bottom_pane/mod.rs +++ b/codex-rs/tui/src/bottom_pane/mod.rs @@ -76,7 +76,7 @@ pub(crate) struct MentionBinding { mod chat_composer; mod chat_composer_history; mod command_popup; -pub mod custom_prompt_view; +pub(crate) mod custom_prompt_view; mod experimental_features_view; mod file_search_popup; mod footer; @@ -108,7 +108,7 @@ pub(crate) use title_setup::TerminalTitleSetupView; mod paste_burst; mod pending_input_preview; mod pending_thread_approvals; -pub mod popup_consts; +pub(crate) mod popup_consts; mod scroll_state; mod selection_popup_common; mod textarea; diff --git a/codex-rs/tui/src/chatwidget.rs b/codex-rs/tui/src/chatwidget.rs index 34c0df7b4b..86e11b49c4 100644 --- a/codex-rs/tui/src/chatwidget.rs +++ b/codex-rs/tui/src/chatwidget.rs @@ -95,13 +95,13 @@ use codex_chatgpt::connectors; use codex_config::types::ApprovalsReviewer; use codex_config::types::Notifications; use codex_config::types::WindowsSandboxModeToml; +use codex_core::DEFAULT_PROJECT_DOC_FILENAME; use codex_core::config::Config; use codex_core::config::Constrained; use codex_core::config::ConstraintResult; use codex_core::config_loader::ConfigLayerStackOrdering; use codex_core::find_thread_name_by_id; use codex_core::plugins::PluginsManager; -use codex_core::project_doc::DEFAULT_PROJECT_DOC_FILENAME; use codex_core::skills::model::SkillMetadata; #[cfg(target_os = "windows")] use codex_core::windows_sandbox::WindowsSandboxLevelExt; @@ -5028,15 +5028,7 @@ impl ChatWidget { self.app_event_tx.send(AppEvent::ForkCurrentSession); } SlashCommand::Init => { - let init_target = match self.config.cwd.join(DEFAULT_PROJECT_DOC_FILENAME) { - Ok(path) => path, - Err(err) => { - self.add_error_message(format!( - "Failed to prepare {DEFAULT_PROJECT_DOC_FILENAME}: {err}", - )); - return; - } - }; + let init_target = self.config.cwd.join(DEFAULT_PROJECT_DOC_FILENAME); if init_target.exists() { let message = format!( "{DEFAULT_PROJECT_DOC_FILENAME} already exists here. Skipping /init to avoid overwriting it." @@ -5889,6 +5881,9 @@ impl ChatWidget { items, status, error, + started_at, + completed_at, + duration_ms, } = turn; if matches!(status, TurnStatus::InProgress) { self.last_non_retry_error = None; @@ -5909,6 +5904,9 @@ impl ChatWidget { items: Vec::new(), status, error, + started_at, + completed_at, + duration_ms, }, }, Some(replay_kind), @@ -7297,6 +7295,8 @@ impl ChatWidget { .values() .cloned() .collect(); + let config = self.config.clone(); + let frame_requester = self.frame_requester.clone(); let (cell, handle) = crate::status::new_status_output_with_rate_limits_handle( &self.config, self.status_account_display.as_ref(), @@ -7311,8 +7311,21 @@ impl ChatWidget { self.model_display_name(), collaboration_mode, reasoning_effort_override, + "".to_string(), refreshing_rate_limits, ); + let agents_summary_handle = handle.clone(); + tokio::spawn(async move { + let agents_summary = match crate::status::discover_agents_summary(&config).await { + Ok(summary) => summary, + Err(err) => { + tracing::warn!(error = %err, "failed to discover project docs for /status"); + "".to_string() + } + }; + agents_summary_handle.finish_agents_summary_discovery(agents_summary); + frame_requester.schedule_frame(); + }); if let Some(request_id) = request_id { self.refreshing_status_outputs.push((request_id, handle)); } diff --git a/codex-rs/tui/src/chatwidget/skills.rs b/codex-rs/tui/src/chatwidget/skills.rs index 53dd7e8b8e..b5794f6c4b 100644 --- a/codex-rs/tui/src/chatwidget/skills.rs +++ b/codex-rs/tui/src/chatwidget/skills.rs @@ -13,8 +13,8 @@ use crate::bottom_pane::popup_consts::standard_popup_hint_line; use crate::skills_helpers::skill_description; use crate::skills_helpers::skill_display_name; use codex_chatgpt::connectors::AppInfo; +use codex_core::TOOL_MENTION_SIGIL; use codex_core::connectors::connector_mention_slug; -use codex_core::mention_syntax::TOOL_MENTION_SIGIL; use codex_core::skills::model::SkillDependencies; use codex_core::skills::model::SkillInterface; use codex_core::skills::model::SkillMetadata; diff --git a/codex-rs/tui/src/chatwidget/tests/app_server.rs b/codex-rs/tui/src/chatwidget/tests/app_server.rs index 2cabed2cc8..b5e9995d69 100644 --- a/codex-rs/tui/src/chatwidget/tests/app_server.rs +++ b/codex-rs/tui/src/chatwidget/tests/app_server.rs @@ -93,6 +93,9 @@ async fn live_app_server_turn_completed_clears_working_status_after_answer_item( items: Vec::new(), status: AppServerTurnStatus::InProgress, error: None, + started_at: Some(0), + completed_at: None, + duration_ms: None, }, }), /*replay_kind*/ None, @@ -132,6 +135,9 @@ async fn live_app_server_turn_completed_clears_working_status_after_answer_item( items: Vec::new(), status: AppServerTurnStatus::Completed, error: None, + started_at: None, + completed_at: Some(0), + duration_ms: None, }, }), /*replay_kind*/ None, @@ -415,6 +421,9 @@ async fn live_app_server_failed_turn_does_not_duplicate_error_history() { items: Vec::new(), status: AppServerTurnStatus::InProgress, error: None, + started_at: Some(0), + completed_at: None, + duration_ms: None, }, }), /*replay_kind*/ None, @@ -450,6 +459,9 @@ async fn live_app_server_failed_turn_does_not_duplicate_error_history() { codex_error_info: None, additional_details: None, }), + started_at: None, + completed_at: Some(0), + duration_ms: None, }, }), /*replay_kind*/ None, @@ -471,6 +483,9 @@ async fn live_app_server_stream_recovery_restores_previous_status_header() { items: Vec::new(), status: AppServerTurnStatus::InProgress, error: None, + started_at: Some(0), + completed_at: None, + duration_ms: None, }, }), /*replay_kind*/ None, @@ -525,6 +540,9 @@ async fn live_app_server_server_overloaded_error_renders_warning() { items: Vec::new(), status: AppServerTurnStatus::InProgress, error: None, + started_at: Some(0), + completed_at: None, + duration_ms: None, }, }), /*replay_kind*/ None, diff --git a/codex-rs/tui/src/chatwidget/tests/composer_submission.rs b/codex-rs/tui/src/chatwidget/tests/composer_submission.rs index a5678a6b93..1bae5d2aa8 100644 --- a/codex-rs/tui/src/chatwidget/tests/composer_submission.rs +++ b/codex-rs/tui/src/chatwidget/tests/composer_submission.rs @@ -618,6 +618,8 @@ async fn interrupted_turn_restore_keeps_active_mode_for_resubmission() { msg: EventMsg::TurnAborted(codex_protocol::protocol::TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }), }); @@ -1040,6 +1042,8 @@ async fn interrupt_restores_queued_messages_into_composer() { msg: EventMsg::TurnAborted(codex_protocol::protocol::TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }), }); @@ -1079,6 +1083,8 @@ async fn interrupt_prepends_queued_messages_before_existing_composer_text() { msg: EventMsg::TurnAborted(codex_protocol::protocol::TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }), }); diff --git a/codex-rs/tui/src/chatwidget/tests/exec_flow.rs b/codex-rs/tui/src/chatwidget/tests/exec_flow.rs index 3719ecf3cf..d271fc0076 100644 --- a/codex-rs/tui/src/chatwidget/tests/exec_flow.rs +++ b/codex-rs/tui/src/chatwidget/tests/exec_flow.rs @@ -635,6 +635,7 @@ async fn unified_exec_wait_after_final_agent_message_snapshot() { id: "turn-1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -649,6 +650,8 @@ async fn unified_exec_wait_after_final_agent_message_snapshot() { msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: Some("Final response.".into()), + completed_at: None, + duration_ms: None, }), }); @@ -667,6 +670,7 @@ async fn unified_exec_wait_before_streamed_agent_message_snapshot() { id: "turn-1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -691,6 +695,8 @@ async fn unified_exec_wait_before_streamed_agent_message_snapshot() { msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), }); @@ -756,6 +762,8 @@ async fn unified_exec_waiting_multiple_empty_snapshots() { msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), }); @@ -834,6 +842,8 @@ async fn unified_exec_non_empty_then_empty_snapshots() { msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), }); @@ -856,11 +866,7 @@ async fn unified_exec_non_empty_then_empty_snapshots() { #[tokio::test] async fn view_image_tool_call_adds_history_cell() { let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(/*model_override*/ None).await; - let image_path = chat - .config - .cwd - .join("example.png") - .expect("absolute image path"); + let image_path = chat.config.cwd.join("example.png"); chat.handle_codex_event(Event { id: "sub-image".into(), @@ -1259,6 +1265,8 @@ async fn interrupt_preserves_unified_exec_processes() { msg: EventMsg::TurnAborted(codex_protocol::protocol::TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }), }); @@ -1291,6 +1299,7 @@ async fn interrupt_preserves_unified_exec_wait_streak_snapshot() { id: "turn-1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -1304,6 +1313,8 @@ async fn interrupt_preserves_unified_exec_wait_streak_snapshot() { msg: EventMsg::TurnAborted(codex_protocol::protocol::TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }), }); @@ -1331,6 +1342,8 @@ async fn turn_complete_keeps_unified_exec_processes() { msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), }); diff --git a/codex-rs/tui/src/chatwidget/tests/history_replay.rs b/codex-rs/tui/src/chatwidget/tests/history_replay.rs index 58993d67e8..4f43b498d7 100644 --- a/codex-rs/tui/src/chatwidget/tests/history_replay.rs +++ b/codex-rs/tui/src/chatwidget/tests/history_replay.rs @@ -536,6 +536,9 @@ async fn replayed_retryable_app_server_error_keeps_turn_running() { items: Vec::new(), status: AppServerTurnStatus::InProgress, error: None, + started_at: Some(0), + completed_at: None, + duration_ms: None, }, }), Some(ReplayKind::ThreadSnapshot), @@ -686,6 +689,9 @@ async fn live_reasoning_summary_is_not_rendered_twice_when_item_completes() { items: Vec::new(), status: AppServerTurnStatus::InProgress, error: None, + started_at: Some(0), + completed_at: None, + duration_ms: None, }, }), /*replay_kind*/ None, @@ -731,6 +737,7 @@ async fn replayed_turn_started_does_not_mark_task_running() { chat.replay_initial_messages(vec![EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, })]); @@ -747,6 +754,7 @@ async fn thread_snapshot_replayed_turn_started_marks_task_running() { id: "turn-1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -771,6 +779,9 @@ async fn replayed_in_progress_turn_marks_task_running() { items: Vec::new(), status: AppServerTurnStatus::InProgress, error: None, + started_at: None, + completed_at: None, + duration_ms: None, }], ReplayKind::ResumeInitialMessages, ); @@ -813,6 +824,7 @@ async fn thread_snapshot_replayed_stream_recovery_restores_previous_status_heade id: "task".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -853,6 +865,7 @@ async fn resume_replay_interrupted_reconnect_does_not_leave_stale_working_state( chat.replay_initial_messages(vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -884,6 +897,7 @@ async fn replayed_interrupted_reconnect_footer_row_snapshot() { chat.replay_initial_messages(vec![ EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -909,6 +923,7 @@ async fn stream_recovery_restores_previous_status_header() { id: "task".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), diff --git a/codex-rs/tui/src/chatwidget/tests/mcp_startup.rs b/codex-rs/tui/src/chatwidget/tests/mcp_startup.rs index 9fed8177f1..8b1180cfa3 100644 --- a/codex-rs/tui/src/chatwidget/tests/mcp_startup.rs +++ b/codex-rs/tui/src/chatwidget/tests/mcp_startup.rs @@ -34,6 +34,7 @@ async fn mcp_startup_complete_does_not_clear_running_task() { id: "task-1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), diff --git a/codex-rs/tui/src/chatwidget/tests/plan_mode.rs b/codex-rs/tui/src/chatwidget/tests/plan_mode.rs index 265339f5d1..29b1b8c591 100644 --- a/codex-rs/tui/src/chatwidget/tests/plan_mode.rs +++ b/codex-rs/tui/src/chatwidget/tests/plan_mode.rs @@ -566,6 +566,8 @@ async fn plan_implementation_popup_skips_replayed_turn_complete() { chat.replay_initial_messages(vec![EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: Some("Plan details".to_string()), + completed_at: None, + duration_ms: None, })]); let popup = render_bottom_popup(&chat, /*width*/ 80); @@ -590,6 +592,8 @@ async fn plan_implementation_popup_shows_once_when_replay_precedes_live_turn_com chat.replay_initial_messages(vec![EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: Some("Plan details".to_string()), + completed_at: None, + duration_ms: None, })]); let replay_popup = render_bottom_popup(&chat, /*width*/ 80); assert!( @@ -602,6 +606,8 @@ async fn plan_implementation_popup_shows_once_when_replay_precedes_live_turn_com msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: Some("Plan details".to_string()), + completed_at: None, + duration_ms: None, }), }); @@ -623,6 +629,8 @@ async fn plan_implementation_popup_shows_once_when_replay_precedes_live_turn_com msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: Some("Plan details".to_string()), + completed_at: None, + duration_ms: None, }), }); let duplicate_popup = render_bottom_popup(&chat, /*width*/ 80); @@ -850,6 +858,9 @@ async fn submit_user_message_queues_while_compaction_turn_is_running() { items: Vec::new(), status: AppServerTurnStatus::InProgress, error: None, + started_at: Some(0), + completed_at: None, + duration_ms: None, }, }), /*replay_kind*/ None, @@ -893,6 +904,9 @@ async fn submit_user_message_queues_while_compaction_turn_is_running() { items: Vec::new(), status: AppServerTurnStatus::Completed, error: None, + started_at: None, + completed_at: Some(0), + duration_ms: None, }, }), /*replay_kind*/ None, diff --git a/codex-rs/tui/src/chatwidget/tests/popups_and_settings.rs b/codex-rs/tui/src/chatwidget/tests/popups_and_settings.rs index f899267858..8d14d4dda6 100644 --- a/codex-rs/tui/src/chatwidget/tests/popups_and_settings.rs +++ b/codex-rs/tui/src/chatwidget/tests/popups_and_settings.rs @@ -1761,13 +1761,11 @@ async fn feedback_upload_consent_popup_snapshot() { chat.app_event_tx.clone(), crate::app_event::FeedbackCategory::Bug, chat.current_rollout_path.clone(), - &codex_feedback::feedback_diagnostics::FeedbackDiagnostics::new(vec![ - codex_feedback::feedback_diagnostics::FeedbackDiagnostic { - headline: "Proxy environment variables are set and may affect connectivity." - .to_string(), - details: vec!["HTTPS_PROXY = hello".to_string()], - }, - ]), + &codex_feedback::FeedbackDiagnostics::new(vec![codex_feedback::FeedbackDiagnostic { + headline: "Proxy environment variables are set and may affect connectivity." + .to_string(), + details: vec!["HTTPS_PROXY = hello".to_string()], + }]), )); let popup = render_bottom_popup(&chat, /*width*/ 80); @@ -1782,13 +1780,11 @@ async fn feedback_good_result_consent_popup_includes_connectivity_diagnostics_fi chat.app_event_tx.clone(), crate::app_event::FeedbackCategory::GoodResult, chat.current_rollout_path.clone(), - &codex_feedback::feedback_diagnostics::FeedbackDiagnostics::new(vec![ - codex_feedback::feedback_diagnostics::FeedbackDiagnostic { - headline: "Proxy environment variables are set and may affect connectivity." - .to_string(), - details: vec!["HTTPS_PROXY = hello".to_string()], - }, - ]), + &codex_feedback::FeedbackDiagnostics::new(vec![codex_feedback::FeedbackDiagnostic { + headline: "Proxy environment variables are set and may affect connectivity." + .to_string(), + details: vec!["HTTPS_PROXY = hello".to_string()], + }]), )); let popup = render_bottom_popup(&chat, /*width*/ 80); diff --git a/codex-rs/tui/src/chatwidget/tests/review_mode.rs b/codex-rs/tui/src/chatwidget/tests/review_mode.rs index 2034921a01..c42ec4fbac 100644 --- a/codex-rs/tui/src/chatwidget/tests/review_mode.rs +++ b/codex-rs/tui/src/chatwidget/tests/review_mode.rs @@ -61,6 +61,8 @@ async fn interrupted_turn_restores_queued_messages_with_images_and_elements() { msg: EventMsg::TurnAborted(codex_protocol::protocol::TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }), }); @@ -146,6 +148,7 @@ async fn steer_rejection_queues_review_follow_up_before_existing_queued_messages id: "turn-start".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -229,6 +232,8 @@ async fn steer_rejection_queues_review_follow_up_before_existing_queued_messages msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), }); @@ -248,6 +253,8 @@ async fn steer_rejection_queues_review_follow_up_before_existing_queued_messages msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-2".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), }); @@ -932,6 +939,8 @@ async fn replaced_turn_clears_pending_steers_but_keeps_queued_drafts() { msg: EventMsg::TurnAborted(codex_protocol::protocol::TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Replaced, + completed_at: None, + duration_ms: None, }), }); @@ -1155,6 +1164,8 @@ async fn interrupt_exec_marks_failed_snapshot() { msg: EventMsg::TurnAborted(codex_protocol::protocol::TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }), }); @@ -1180,6 +1191,7 @@ async fn interrupted_turn_error_message_snapshot() { id: "task-1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -1191,6 +1203,8 @@ async fn interrupted_turn_error_message_snapshot() { msg: EventMsg::TurnAborted(codex_protocol::protocol::TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }), }); @@ -1217,6 +1231,7 @@ async fn interrupted_turn_pending_steers_message_snapshot() { id: "task-1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -1227,6 +1242,8 @@ async fn interrupted_turn_pending_steers_message_snapshot() { msg: EventMsg::TurnAborted(codex_protocol::protocol::TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }), }); @@ -1323,6 +1340,8 @@ async fn review_ended_keeps_unified_exec_processes() { msg: EventMsg::TurnAborted(codex_protocol::protocol::TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::ReviewEnded, + completed_at: None, + duration_ms: None, }), }); @@ -1355,6 +1374,7 @@ async fn enter_submits_steer_while_review_is_running() { id: "turn-start".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -1403,6 +1423,7 @@ async fn review_queues_user_messages_snapshot() { id: "turn-start".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), diff --git a/codex-rs/tui/src/chatwidget/tests/slash_commands.rs b/codex-rs/tui/src/chatwidget/tests/slash_commands.rs index c6fff59897..fdf7c5008b 100644 --- a/codex-rs/tui/src/chatwidget/tests/slash_commands.rs +++ b/codex-rs/tui/src/chatwidget/tests/slash_commands.rs @@ -97,6 +97,8 @@ async fn slash_copy_state_tracks_turn_complete_final_reply() { msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: Some("Final reply **markdown**".to_string()), + completed_at: None, + duration_ms: None, }), }); @@ -127,6 +129,8 @@ async fn slash_copy_state_tracks_plan_item_completion() { msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), }); @@ -160,6 +164,8 @@ async fn slash_copy_state_is_preserved_during_running_task() { msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: Some("Previous completed reply".to_string()), + completed_at: None, + duration_ms: None, }), }); chat.on_task_started(); @@ -179,6 +185,8 @@ async fn slash_copy_state_clears_on_thread_rollback() { msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: Some("Reply that will be rolled back".to_string()), + completed_at: None, + duration_ms: None, }), }); chat.handle_codex_event(Event { @@ -207,6 +215,8 @@ async fn slash_copy_is_unavailable_when_legacy_agent_message_is_not_repeated_on_ msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), }); let _ = drain_insert_history(&mut rx); @@ -232,6 +242,7 @@ async fn slash_copy_uses_agent_message_item_when_turn_complete_omits_final_text( id: "turn-1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -248,6 +259,8 @@ async fn slash_copy_uses_agent_message_item_when_turn_complete_omits_final_text( msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), }); let _ = drain_insert_history(&mut rx); @@ -277,6 +290,7 @@ async fn slash_copy_does_not_return_stale_output_after_thread_rollback() { id: "turn-1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -293,6 +307,8 @@ async fn slash_copy_does_not_return_stale_output_after_thread_rollback() { msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), }); let _ = drain_insert_history(&mut rx); @@ -656,6 +672,7 @@ async fn compact_queues_user_messages_snapshot() { id: "turn-start".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), diff --git a/codex-rs/tui/src/chatwidget/tests/status_and_layout.rs b/codex-rs/tui/src/chatwidget/tests/status_and_layout.rs index d7e1a6ba86..00be80d4e0 100644 --- a/codex-rs/tui/src/chatwidget/tests/status_and_layout.rs +++ b/codex-rs/tui/src/chatwidget/tests/status_and_layout.rs @@ -73,6 +73,7 @@ async fn turn_started_uses_runtime_context_window_before_first_token_count() { id: "turn-start".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: Some(950_000), collaboration_mode_kind: ModeKind::Default, }), @@ -628,6 +629,7 @@ async fn ui_snapshots_small_heights_task_running() { id: "task-1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -661,6 +663,7 @@ async fn status_widget_and_approval_modal_snapshot() { id: "task-1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -723,6 +726,7 @@ async fn status_widget_active_snapshot() { id: "task-1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -877,6 +881,8 @@ async fn status_line_branch_refreshes_after_turn_complete() { msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), }); @@ -895,6 +901,8 @@ async fn status_line_branch_refreshes_after_interrupt() { msg: EventMsg::TurnAborted(codex_protocol::protocol::TurnAbortedEvent { turn_id: Some("turn-1".to_string()), reason: TurnAbortReason::Interrupted, + completed_at: None, + duration_ms: None, }), }); @@ -1120,6 +1128,7 @@ async fn multiple_agent_messages_in_single_turn_emit_multiple_headers() { id: "s1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -1142,6 +1151,8 @@ async fn multiple_agent_messages_in_single_turn_emit_multiple_headers() { msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), }); @@ -1425,6 +1436,7 @@ async fn chatwidget_exec_and_status_layout_vt100_snapshot() { id: "t1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -1477,6 +1489,7 @@ async fn chatwidget_markdown_code_blocks_vt100_snapshot() { id: "t1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), @@ -1551,6 +1564,8 @@ printf 'fenced within fenced\n' msg: EventMsg::TurnComplete(TurnCompleteEvent { turn_id: "turn-1".to_string(), last_agent_message: None, + completed_at: None, + duration_ms: None, }), }); for lines in drain_insert_history(&mut rx) { @@ -1572,6 +1587,7 @@ async fn chatwidget_tall() { id: "t1".into(), msg: EventMsg::TurnStarted(TurnStartedEvent { turn_id: "turn-1".to_string(), + started_at: None, model_context_window: None, collaboration_mode_kind: ModeKind::Default, }), diff --git a/codex-rs/tui/src/debug_config.rs b/codex-rs/tui/src/debug_config.rs index 86d9949c58..bc72304974 100644 --- a/codex-rs/tui/src/debug_config.rs +++ b/codex-rs/tui/src/debug_config.rs @@ -337,6 +337,7 @@ fn format_network_constraints(network: &NetworkConstraints) -> String { dangerously_allow_all_unix_sockets, domains, managed_allowed_domains_only, + danger_full_access_denylist_only, unix_sockets, allow_local_binding, } = network; @@ -374,6 +375,11 @@ fn format_network_constraints(network: &NetworkConstraints) -> String { "managed_allowed_domains_only={managed_allowed_domains_only}" )); } + if let Some(danger_full_access_denylist_only) = danger_full_access_denylist_only { + parts.push(format!( + "danger_full_access_denylist_only={danger_full_access_denylist_only}" + )); + } if let Some(unix_sockets) = unix_sockets { parts.push(format!( "unix_sockets={}", @@ -557,6 +563,7 @@ mod tests { NetworkDomainPermissionToml::Allow, )]), }), + danger_full_access_denylist_only: Some(true), ..Default::default() }, RequirementSource::CloudRequirements, @@ -621,7 +628,7 @@ mod tests { assert!(rendered.contains("mcp_servers: docs (source: MDM managed_config.toml (legacy))")); assert!(rendered.contains("enforce_residency: us (source: cloud requirements)")); assert!(rendered.contains( - "experimental_network: enabled=true, domains={example.com=allow} (source: cloud requirements)" + "experimental_network: enabled=true, domains={example.com=allow}, danger_full_access_denylist_only=true (source: cloud requirements)" )); assert!(!rendered.contains(" - rules:")); } diff --git a/codex-rs/tui/src/history_cell.rs b/codex-rs/tui/src/history_cell.rs index cf65f918e5..67c7e9f98b 100644 --- a/codex-rs/tui/src/history_cell.rs +++ b/codex-rs/tui/src/history_cell.rs @@ -40,15 +40,16 @@ use crate::wrapping::adaptive_wrap_line; use crate::wrapping::adaptive_wrap_lines; use base64::Engine; use codex_app_server_protocol::McpServerStatus; +use codex_app_server_protocol::McpServerStatusDetail; use codex_config::types::McpServerTransportConfig; +#[cfg(test)] +use codex_core::McpManager; use codex_core::config::Config; #[cfg(test)] -use codex_core::mcp::McpManager; -#[cfg(test)] use codex_core::plugins::PluginsManager; -use codex_core::web_search::web_search_detail; +use codex_core::web_search_detail; #[cfg(test)] -use codex_mcp::mcp::qualified_mcp_tool_name_prefix; +use codex_mcp::qualified_mcp_tool_name_prefix; use codex_otel::RuntimeMetricsSummary; use codex_protocol::account::PlanType; use codex_protocol::config_types::ServiceTier; @@ -69,7 +70,7 @@ use codex_protocol::protocol::SessionConfiguredEvent; use codex_protocol::request_user_input::RequestUserInputAnswer; use codex_protocol::request_user_input::RequestUserInputQuestion; use codex_protocol::user_input::TextElement; -use codex_utils_cli::format_env_display::format_env_display; +use codex_utils_cli::format_env_display; use image::DynamicImage; use image::ImageReader; use ratatui::prelude::*; @@ -1979,10 +1980,12 @@ pub(crate) fn new_mcp_tools_output( /// transport details such as command, URL, cwd, and environment display. /// /// This mirrors the layout of [`new_mcp_tools_output`] but sources data from -/// the paginated RPC response rather than the in-process `McpManager`. +/// the paginated RPC response rather than the in-process `McpManager`. The +/// `detail` flag controls whether resources and resource templates are rendered. pub(crate) fn new_mcp_tools_output_from_statuses( config: &Config, statuses: &[McpServerStatus], + detail: McpServerStatusDetail, ) -> PlainHistoryCell { let mut lines: Vec> = vec![ "/mcp".magenta().into(), @@ -2094,48 +2097,50 @@ pub(crate) fn new_mcp_tools_output_from_statuses( lines.push(vec![" • Tools: ".into(), names.join(", ").into()].into()); } - let server_resources = status - .map(|status| status.resources.clone()) - .unwrap_or_default(); - if server_resources.is_empty() { - lines.push(" • Resources: (none)".into()); - } else { - let mut spans: Vec> = vec![" • Resources: ".into()]; + if matches!(detail, McpServerStatusDetail::Full) { + let server_resources = status + .map(|status| status.resources.clone()) + .unwrap_or_default(); + if server_resources.is_empty() { + lines.push(" • Resources: (none)".into()); + } else { + let mut spans: Vec> = vec![" • Resources: ".into()]; - for (idx, resource) in server_resources.iter().enumerate() { - if idx > 0 { - spans.push(", ".into()); + for (idx, resource) in server_resources.iter().enumerate() { + if idx > 0 { + spans.push(", ".into()); + } + + let label = resource.title.as_ref().unwrap_or(&resource.name); + spans.push(label.clone().into()); + spans.push(" ".into()); + spans.push(format!("({})", resource.uri).dim()); } - let label = resource.title.as_ref().unwrap_or(&resource.name); - spans.push(label.clone().into()); - spans.push(" ".into()); - spans.push(format!("({})", resource.uri).dim()); + lines.push(spans.into()); } - lines.push(spans.into()); - } + let server_templates = status + .map(|status| status.resource_templates.clone()) + .unwrap_or_default(); + if server_templates.is_empty() { + lines.push(" • Resource templates: (none)".into()); + } else { + let mut spans: Vec> = vec![" • Resource templates: ".into()]; - let server_templates = status - .map(|status| status.resource_templates.clone()) - .unwrap_or_default(); - if server_templates.is_empty() { - lines.push(" • Resource templates: (none)".into()); - } else { - let mut spans: Vec> = vec![" • Resource templates: ".into()]; + for (idx, template) in server_templates.iter().enumerate() { + if idx > 0 { + spans.push(", ".into()); + } - for (idx, template) in server_templates.iter().enumerate() { - if idx > 0 { - spans.push(", ".into()); + let label = template.title.as_ref().unwrap_or(&template.name); + spans.push(label.clone().into()); + spans.push(" ".into()); + spans.push(format!("({})", template.uri_template).dim()); } - let label = template.title.as_ref().unwrap_or(&template.name); - spans.push(label.clone().into()); - spans.push(" ".into()); - spans.push(format!("({})", template.uri_template).dim()); + lines.push(spans.into()); } - - lines.push(spans.into()); } lines.push(Line::from("")); @@ -3340,7 +3345,11 @@ mod tests { auth_status: codex_app_server_protocol::McpAuthStatus::Unsupported, }]; - let cell = new_mcp_tools_output_from_statuses(&config, &statuses); + let cell = new_mcp_tools_output_from_statuses( + &config, + &statuses, + McpServerStatusDetail::ToolsAndAuthOnly, + ); let rendered = render_lines(&cell.display_lines(/*width*/ 120)).join("\n"); insta::assert_snapshot!(rendered); diff --git a/codex-rs/tui/src/lib.rs b/codex-rs/tui/src/lib.rs index 991c91c550..7cd4c64949 100644 --- a/codex-rs/tui/src/lib.rs +++ b/codex-rs/tui/src/lib.rs @@ -102,7 +102,8 @@ mod clipboard_paste; mod clipboard_text; mod collaboration_modes; mod color; -pub mod custom_terminal; +pub(crate) mod custom_terminal; +pub use custom_terminal::Terminal; mod cwd_prompt; mod debug_config; mod diff_render; @@ -113,10 +114,12 @@ mod file_search; mod frames; mod get_git_diff; mod history_cell; -pub mod insert_history; +pub(crate) mod insert_history; +pub use insert_history::insert_history_lines; mod key_hint; mod line_truncation; -pub mod live_wrap; +pub(crate) mod live_wrap; +pub use live_wrap::RowBuilder; mod local_chatgpt_auth; mod markdown; mod markdown_render; @@ -126,10 +129,10 @@ mod model_catalog; mod model_migration; mod multi_agents; mod notifications; -pub mod onboarding; +pub(crate) mod onboarding; mod oss_selection; mod pager_overlay; -pub mod public_widgets; +pub(crate) mod public_widgets; mod render; mod resume_picker; mod selection_list; @@ -148,7 +151,8 @@ mod theme_picker; mod tooltips; mod tui; mod ui_consts; -pub mod update_action; +pub(crate) mod update_action; +pub use update_action::UpdateAction; mod update_prompt; mod updates; mod version; @@ -212,7 +216,7 @@ mod voice { mod wrapping; #[cfg(test)] -pub mod test_backend; +pub(crate) mod test_backend; #[cfg(test)] pub(crate) mod test_support; @@ -1697,9 +1701,9 @@ mod tests { use codex_app_server_protocol::RequestId; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::ThreadStartResponse; + use codex_config::config_toml::ProjectConfig; use codex_core::config::ConfigBuilder; use codex_core::config::ConfigOverrides; - use codex_core::config::ProjectConfig; use codex_features::Feature; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::RolloutItem; diff --git a/codex-rs/tui/src/local_chatgpt_auth.rs b/codex-rs/tui/src/local_chatgpt_auth.rs index cb8a3a4aae..e888c0387c 100644 --- a/codex-rs/tui/src/local_chatgpt_auth.rs +++ b/codex-rs/tui/src/local_chatgpt_auth.rs @@ -1,7 +1,9 @@ +#![cfg(test)] + use std::path::Path; use codex_app_server_protocol::AuthMode; -use codex_login::AuthCredentialsStoreMode; +use codex_config::types::AuthCredentialsStoreMode; use codex_login::load_auth_dot_json; #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/codex-rs/tui/src/mention_codec.rs b/codex-rs/tui/src/mention_codec.rs index 6e95067ba4..d3d1ead4de 100644 --- a/codex-rs/tui/src/mention_codec.rs +++ b/codex-rs/tui/src/mention_codec.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; use std::collections::VecDeque; -use codex_core::mention_syntax::PLUGIN_TEXT_MENTION_SIGIL; -use codex_core::mention_syntax::TOOL_MENTION_SIGIL; +use codex_core::PLUGIN_TEXT_MENTION_SIGIL; +use codex_core::TOOL_MENTION_SIGIL; #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct LinkedMention { diff --git a/codex-rs/tui/src/onboarding/auth.rs b/codex-rs/tui/src/onboarding/auth.rs index a5915618ca..f991d028cd 100644 --- a/codex-rs/tui/src/onboarding/auth.rs +++ b/codex-rs/tui/src/onboarding/auth.rs @@ -8,8 +8,6 @@ use codex_app_server_protocol::CancelLoginAccountParams; use codex_app_server_protocol::ClientRequest; use codex_app_server_protocol::LoginAccountParams; use codex_app_server_protocol::LoginAccountResponse; -use codex_login::AuthCredentialsStoreMode; -use codex_login::DeviceCode; use codex_login::read_openai_api_key_from_env; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; @@ -33,6 +31,7 @@ use ratatui::widgets::WidgetRef; use ratatui::widgets::Wrap; use codex_protocol::config_types::ForcedLoginMethod; +use std::cell::Cell; use std::sync::Arc; use std::sync::RwLock; use uuid::Uuid; @@ -77,8 +76,6 @@ pub(crate) fn mark_url_hyperlink(buf: &mut Buffer, area: Rect, url: &str) { } } } -use std::path::PathBuf; -use tokio::sync::Notify; use super::onboarding_screen::StepState; @@ -108,6 +105,20 @@ fn onboarding_request_id() -> codex_app_server_protocol::RequestId { codex_app_server_protocol::RequestId::String(Uuid::new_v4().to_string()) } +pub(super) async fn cancel_login_attempt( + request_handle: &AppServerRequestHandle, + login_id: String, +) { + let _ = request_handle + .request_typed::( + ClientRequest::CancelLoginAccount { + request_id: onboarding_request_id(), + params: CancelLoginAccountParams { login_id }, + }, + ) + .await; +} + #[derive(Clone, Default)] pub(crate) struct ApiKeyInputState { value: String, @@ -123,8 +134,49 @@ pub(crate) struct ContinueInBrowserState { #[derive(Clone)] pub(crate) struct ContinueWithDeviceCodeState { - device_code: Option, - cancel: Option>, + request_id: String, + login_id: Option, + verification_url: Option, + user_code: Option, +} + +impl ContinueWithDeviceCodeState { + pub(crate) fn pending(request_id: String) -> Self { + Self { + request_id, + login_id: None, + verification_url: None, + user_code: None, + } + } + + pub(crate) fn ready( + request_id: String, + login_id: String, + verification_url: String, + user_code: String, + ) -> Self { + Self { + request_id, + login_id: Some(login_id), + verification_url: Some(verification_url), + user_code: Some(user_code), + } + } + + pub(crate) fn login_id(&self) -> Option<&str> { + self.login_id.as_deref() + } + + pub(crate) fn is_showing_copyable_auth(&self) -> bool { + self.verification_url + .as_deref() + .is_some_and(|url| !url.is_empty()) + && self + .user_code + .as_deref() + .is_some_and(|user_code| !user_code.is_empty()) + } } impl KeyboardHandler for AuthModeWidget { @@ -181,16 +233,25 @@ pub(crate) struct AuthModeWidget { pub highlighted_mode: SignInOption, pub error: Arc>>, pub sign_in_state: Arc>, - pub codex_home: PathBuf, - pub cli_auth_credentials_store_mode: AuthCredentialsStoreMode, pub login_status: LoginStatus, pub app_server_request_handle: AppServerRequestHandle, - pub forced_chatgpt_workspace_id: Option, pub forced_login_method: Option, pub animations_enabled: bool, + pub animations_suppressed: Cell, } impl AuthModeWidget { + pub(crate) fn set_animations_suppressed(&self, suppressed: bool) { + self.animations_suppressed.set(suppressed); + } + + pub(crate) fn should_suppress_animations(&self) -> bool { + matches!( + &*self.sign_in_state.read().unwrap(), + SignInState::ChatGptContinueInBrowser(_) | SignInState::ChatGptDeviceCode(_) + ) + } + pub(crate) fn cancel_active_attempt(&self) { let mut sign_in_state = self.sign_in_state.write().unwrap(); match &*sign_in_state { @@ -198,19 +259,15 @@ impl AuthModeWidget { let request_handle = self.app_server_request_handle.clone(); let login_id = state.login_id.clone(); tokio::spawn(async move { - let _ = request_handle - .request_typed::( - ClientRequest::CancelLoginAccount { - request_id: onboarding_request_id(), - params: CancelLoginAccountParams { login_id }, - }, - ) - .await; + cancel_login_attempt(&request_handle, login_id).await; }); } SignInState::ChatGptDeviceCode(state) => { - if let Some(cancel) = &state.cancel { - cancel.notify_one(); + if let Some(login_id) = state.login_id().map(str::to_owned) { + let request_handle = self.app_server_request_handle.clone(); + tokio::spawn(async move { + cancel_login_attempt(&request_handle, login_id).await; + }); } } _ => return, @@ -415,7 +472,7 @@ impl AuthModeWidget { fn render_continue_in_browser(&self, area: Rect, buf: &mut Buffer) { let mut spans = vec![" ".into()]; - if self.animations_enabled { + if self.animations_enabled && !self.animations_suppressed.get() { // Schedule a follow-up frame to keep the shimmer animation going. self.request_frame .schedule_frame_in(std::time::Duration::from_millis(100)); @@ -814,6 +871,9 @@ impl AuthModeWidget { let is_matching_login = matches!( &*guard, SignInState::ChatGptContinueInBrowser(state) if state.login_id == login_id + ) || matches!( + &*guard, + SignInState::ChatGptDeviceCode(state) if state.login_id() == Some(login_id.as_str()) ); drop(guard); if !is_matching_login { @@ -900,6 +960,7 @@ mod tests { use codex_app_server_client::InProcessClientStartArgs; use codex_arg0::Arg0DispatchPaths; use codex_cloud_requirements::cloud_requirements_loader_for_storage; + use codex_config::types::AuthCredentialsStoreMode; use codex_core::config::ConfigBuilder; use codex_protocol::protocol::SessionSource; @@ -943,13 +1004,11 @@ mod tests { highlighted_mode: SignInOption::ChatGpt, error: Arc::new(RwLock::new(None)), sign_in_state: Arc::new(RwLock::new(SignInState::PickMode)), - codex_home: codex_home_path.clone(), - cli_auth_credentials_store_mode: AuthCredentialsStoreMode::File, login_status: LoginStatus::NotAuthenticated, app_server_request_handle: AppServerRequestHandle::InProcess(client.request_handle()), - forced_chatgpt_workspace_id: None, forced_login_method: Some(ForcedLoginMethod::Chatgpt), animations_enabled: true, + animations_suppressed: std::cell::Cell::new(false), }; (widget, codex_home) } @@ -1023,13 +1082,14 @@ mod tests { #[tokio::test] async fn cancel_active_attempt_notifies_device_code_login() { let (widget, _tmp) = widget_forced_chatgpt().await; - let cancel = Arc::new(Notify::new()); *widget.error.write().unwrap() = Some("still logging in".to_string()); *widget.sign_in_state.write().unwrap() = - SignInState::ChatGptDeviceCode(ContinueWithDeviceCodeState { - device_code: None, - cancel: Some(cancel.clone()), - }); + SignInState::ChatGptDeviceCode(ContinueWithDeviceCodeState::ready( + "request-1".to_string(), + "login-1".to_string(), + "https://chatgpt.com/device".to_string(), + "ABCD-EFGH".to_string(), + )); widget.cancel_active_attempt(); @@ -1038,11 +1098,6 @@ mod tests { &*widget.sign_in_state.read().unwrap(), SignInState::PickMode )); - assert!( - tokio::time::timeout(std::time::Duration::from_millis(50), cancel.notified()) - .await - .is_ok() - ); } /// Collects all buffer cell symbols that contain the OSC 8 open sequence @@ -1085,6 +1140,55 @@ mod tests { assert_eq!(found, url, "OSC 8 hyperlink should cover the full URL"); } + #[test] + fn auth_widget_suppresses_animations_when_device_code_is_visible() { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let (widget, _tmp) = runtime.block_on(widget_forced_chatgpt()); + *widget.sign_in_state.write().unwrap() = + SignInState::ChatGptDeviceCode(ContinueWithDeviceCodeState::ready( + "request-1".to_string(), + "login-1".to_string(), + "https://chatgpt.com/device".to_string(), + "ABCD-EFGH".to_string(), + )); + + assert_eq!(widget.should_suppress_animations(), true); + } + + #[test] + fn auth_widget_suppresses_animations_while_requesting_device_code() { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let (widget, _tmp) = runtime.block_on(widget_forced_chatgpt()); + *widget.sign_in_state.write().unwrap() = SignInState::ChatGptDeviceCode( + ContinueWithDeviceCodeState::pending("request-1".to_string()), + ); + + assert_eq!(widget.should_suppress_animations(), true); + } + + #[tokio::test] + async fn device_code_login_completion_advances_to_success_message() { + let (mut widget, _tmp) = widget_forced_chatgpt().await; + *widget.sign_in_state.write().unwrap() = + SignInState::ChatGptDeviceCode(ContinueWithDeviceCodeState::ready( + "request-1".to_string(), + "login-1".to_string(), + "https://chatgpt.com/device".to_string(), + "ABCD-EFGH".to_string(), + )); + + widget.on_account_login_completed(AccountLoginCompletedNotification { + login_id: Some("login-1".to_string()), + success: true, + error: None, + }); + + assert!(matches!( + &*widget.sign_in_state.read().unwrap(), + SignInState::ChatGptSuccessMessage + )); + } + #[test] fn mark_url_hyperlink_wraps_cyan_underlined_cells() { let url = "https://example.com"; diff --git a/codex-rs/tui/src/onboarding/auth/headless_chatgpt_login.rs b/codex-rs/tui/src/onboarding/auth/headless_chatgpt_login.rs index 97709e9493..a3e82e6cd8 100644 --- a/codex-rs/tui/src/onboarding/auth/headless_chatgpt_login.rs +++ b/codex-rs/tui/src/onboarding/auth/headless_chatgpt_login.rs @@ -1,12 +1,6 @@ -#![allow(dead_code)] - use codex_app_server_protocol::ClientRequest; use codex_app_server_protocol::LoginAccountParams; use codex_app_server_protocol::LoginAccountResponse; -use codex_login::CLIENT_ID; -use codex_login::ServerOptions; -use codex_login::complete_device_code_login; -use codex_login::request_device_code; use ratatui::buffer::Buffer; use ratatui::layout::Rect; use ratatui::prelude::Widget; @@ -14,109 +8,74 @@ use ratatui::style::Stylize; use ratatui::text::Line; use ratatui::widgets::Paragraph; use ratatui::widgets::Wrap; +use uuid::Uuid; -use std::sync::Arc; -use std::sync::RwLock; -use tokio::sync::Notify; - -use crate::local_chatgpt_auth::LocalChatgptAuth; -use crate::local_chatgpt_auth::load_local_chatgpt_auth; use crate::shimmer::shimmer_spans; -use crate::tui::FrameRequester; use super::AuthModeWidget; -use super::ContinueInBrowserState; use super::ContinueWithDeviceCodeState; use super::SignInState; +use super::cancel_login_attempt; use super::mark_url_hyperlink; -use super::maybe_open_auth_url_in_browser; use super::onboarding_request_id; pub(super) fn start_headless_chatgpt_login(widget: &mut AuthModeWidget) { - let mut opts = ServerOptions::new( - widget.codex_home.clone(), - CLIENT_ID.to_string(), - widget.forced_chatgpt_workspace_id.clone(), - widget.cli_auth_credentials_store_mode, - ); - opts.open_browser = false; + let request_id = Uuid::new_v4().to_string(); + *widget.sign_in_state.write().unwrap() = + SignInState::ChatGptDeviceCode(ContinueWithDeviceCodeState::pending(request_id.clone())); + widget.request_frame.schedule_frame(); + let request_handle = widget.app_server_request_handle.clone(); let sign_in_state = widget.sign_in_state.clone(); let request_frame = widget.request_frame.clone(); let error = widget.error.clone(); - let request_handle = widget.app_server_request_handle.clone(); - let codex_home = widget.codex_home.clone(); - let cli_auth_credentials_store_mode = widget.cli_auth_credentials_store_mode; - let forced_chatgpt_workspace_id = widget.forced_chatgpt_workspace_id.clone(); - let cancel = begin_device_code_attempt(&sign_in_state, &request_frame); - tokio::spawn(async move { - let device_code = match request_device_code(&opts).await { - Ok(device_code) => device_code, - Err(err) => { - if err.kind() == std::io::ErrorKind::NotFound { - fallback_to_browser_login( - request_handle, - sign_in_state, - request_frame, - error, - cancel, - ) - .await; + match request_handle + .request_typed::(ClientRequest::LoginAccount { + request_id: onboarding_request_id(), + params: LoginAccountParams::ChatgptDeviceCode, + }) + .await + { + Ok(LoginAccountResponse::ChatgptDeviceCode { + login_id, + verification_url, + user_code, + }) => { + let updated = set_device_code_state_for_active_attempt( + &sign_in_state, + &request_frame, + &request_id, + ContinueWithDeviceCodeState::ready( + request_id.clone(), + login_id.clone(), + verification_url, + user_code, + ), + ); + if updated { + *error.write().unwrap() = None; } else { - set_device_code_error_for_active_attempt( - &sign_in_state, - &request_frame, - &error, - &cancel, - err.to_string(), - ); + cancel_login_attempt(&request_handle, login_id).await; } - return; } - }; - - if !set_device_code_state_for_active_attempt( - &sign_in_state, - &request_frame, - &cancel, - SignInState::ChatGptDeviceCode(ContinueWithDeviceCodeState { - device_code: Some(device_code.clone()), - cancel: Some(cancel.clone()), - }), - ) { - return; - } - - tokio::select! { - _ = cancel.notified() => {} - result = complete_device_code_login(opts, device_code) => { - match result { - Ok(()) => { - let local_auth = load_local_chatgpt_auth( - &codex_home, - cli_auth_credentials_store_mode, - forced_chatgpt_workspace_id.as_deref(), - ); - handle_chatgpt_auth_tokens_login_result_for_active_attempt( - request_handle, - sign_in_state, - request_frame, - error, - cancel, - local_auth, - ).await; - } - Err(err) => { - set_device_code_error_for_active_attempt( - &sign_in_state, - &request_frame, - &error, - &cancel, - err.to_string(), - ); - } - } + Ok(other) => { + let _updated = set_device_code_error_for_active_attempt( + &sign_in_state, + &request_frame, + &error, + &request_id, + format!("Unexpected account/login/start response: {other:?}"), + ); + } + Err(err) => { + let _updated = set_device_code_error_for_active_attempt( + &sign_in_state, + &request_frame, + &error, + &request_id, + err.to_string(), + ); } } }); @@ -128,15 +87,14 @@ pub(super) fn render_device_code_login( buf: &mut Buffer, state: &ContinueWithDeviceCodeState, ) { - let banner = if state.device_code.is_some() { + let banner = if state.is_showing_copyable_auth() { "Finish signing in via your browser" } else { "Preparing device code login" }; let mut spans = vec![" ".into()]; - if widget.animations_enabled { - // Schedule a follow-up frame to keep the shimmer animation going. + if widget.animations_enabled && !widget.animations_suppressed.get() { widget .request_frame .schedule_frame_in(std::time::Duration::from_millis(100)); @@ -147,13 +105,14 @@ pub(super) fn render_device_code_login( let mut lines = vec![spans.into(), "".into()]; - // Capture the verification URL for OSC 8 hyperlink marking after render. - let verification_url = if let Some(device_code) = &state.device_code { + let verification_url = if let (Some(verification_url), Some(user_code)) = + (&state.verification_url, &state.user_code) + { lines.push(" 1. Open this link in your browser and sign in".into()); lines.push("".into()); lines.push(Line::from(vec![ " ".into(), - device_code.verification_url.as_str().cyan().underlined(), + verification_url.as_str().cyan().underlined(), ])); lines.push("".into()); lines.push( @@ -162,7 +121,7 @@ pub(super) fn render_device_code_login( lines.push("".into()); lines.push(Line::from(vec![ " ".into(), - device_code.user_code.as_str().cyan().bold(), + user_code.as_str().cyan().bold(), ])); lines.push("".into()); lines.push( @@ -171,7 +130,7 @@ pub(super) fn render_device_code_login( .into(), ); lines.push("".into()); - Some(device_code.verification_url.clone()) + Some(verification_url.clone()) } else { lines.push(" Requesting a one-time code...".dim().into()); lines.push("".into()); @@ -183,286 +142,139 @@ pub(super) fn render_device_code_login( .wrap(Wrap { trim: false }) .render(area, buf); - // Wrap cyan+underlined URL cells with OSC 8 so the terminal treats - // the entire region as a single clickable hyperlink. if let Some(url) = &verification_url { mark_url_hyperlink(buf, area, url); } } -fn device_code_attempt_matches(state: &SignInState, cancel: &Arc) -> bool { +fn device_code_attempt_matches(state: &SignInState, request_id: &str) -> bool { matches!( state, - SignInState::ChatGptDeviceCode(state) - if state - .cancel - .as_ref() - .is_some_and(|existing| Arc::ptr_eq(existing, cancel)) + SignInState::ChatGptDeviceCode(state) if state.request_id == request_id ) } -fn begin_device_code_attempt( - sign_in_state: &Arc>, - request_frame: &FrameRequester, -) -> Arc { - let cancel = Arc::new(Notify::new()); - *sign_in_state.write().unwrap() = SignInState::ChatGptDeviceCode(ContinueWithDeviceCodeState { - device_code: None, - cancel: Some(cancel.clone()), - }); - request_frame.schedule_frame(); - cancel -} - fn set_device_code_state_for_active_attempt( - sign_in_state: &Arc>, - request_frame: &FrameRequester, - cancel: &Arc, - next_state: SignInState, + sign_in_state: &std::sync::Arc>, + request_frame: &crate::tui::FrameRequester, + request_id: &str, + next_state: ContinueWithDeviceCodeState, ) -> bool { let mut guard = sign_in_state.write().unwrap(); - if !device_code_attempt_matches(&guard, cancel) { + if !device_code_attempt_matches(&guard, request_id) { return false; } - *guard = next_state; - drop(guard); - request_frame.schedule_frame(); - true -} - -fn set_device_code_success_message_for_active_attempt( - sign_in_state: &Arc>, - request_frame: &FrameRequester, - cancel: &Arc, -) -> bool { - let mut guard = sign_in_state.write().unwrap(); - if !device_code_attempt_matches(&guard, cancel) { - return false; - } - - *guard = SignInState::ChatGptSuccessMessage; + *guard = SignInState::ChatGptDeviceCode(next_state); drop(guard); request_frame.schedule_frame(); true } fn set_device_code_error_for_active_attempt( - sign_in_state: &Arc>, - request_frame: &FrameRequester, - error: &Arc>>, - cancel: &Arc, + sign_in_state: &std::sync::Arc>, + request_frame: &crate::tui::FrameRequester, + error: &std::sync::Arc>>, + request_id: &str, message: String, ) -> bool { - if !set_device_code_state_for_active_attempt( - sign_in_state, - request_frame, - cancel, - SignInState::PickMode, - ) { + let mut guard = sign_in_state.write().unwrap(); + if !device_code_attempt_matches(&guard, request_id) { return false; } + + *guard = SignInState::PickMode; + drop(guard); *error.write().unwrap() = Some(message); request_frame.schedule_frame(); true } -async fn fallback_to_browser_login( - request_handle: codex_app_server_client::AppServerRequestHandle, - sign_in_state: Arc>, - request_frame: FrameRequester, - error: Arc>>, - cancel: Arc, -) { - let should_fallback = { - let guard = sign_in_state.read().unwrap(); - device_code_attempt_matches(&guard, &cancel) - }; - if !should_fallback { - return; - } - - match request_handle - .request_typed::(ClientRequest::LoginAccount { - request_id: onboarding_request_id(), - params: LoginAccountParams::Chatgpt, - }) - .await - { - Ok(LoginAccountResponse::Chatgpt { login_id, auth_url }) => { - maybe_open_auth_url_in_browser(&request_handle, &auth_url); - *error.write().unwrap() = None; - let _updated = set_device_code_state_for_active_attempt( - &sign_in_state, - &request_frame, - &cancel, - SignInState::ChatGptContinueInBrowser(ContinueInBrowserState { - login_id, - auth_url, - }), - ); - } - Ok(other) => { - set_device_code_error_for_active_attempt( - &sign_in_state, - &request_frame, - &error, - &cancel, - format!("Unexpected account/login/start response: {other:?}"), - ); - } - Err(err) => { - set_device_code_error_for_active_attempt( - &sign_in_state, - &request_frame, - &error, - &cancel, - err.to_string(), - ); - } - } -} - -async fn handle_chatgpt_auth_tokens_login_result_for_active_attempt( - request_handle: codex_app_server_client::AppServerRequestHandle, - sign_in_state: Arc>, - request_frame: FrameRequester, - error: Arc>>, - cancel: Arc, - local_auth: Result, -) { - let local_auth = match local_auth { - Ok(local_auth) => local_auth, - Err(err) => { - set_device_code_error_for_active_attempt( - &sign_in_state, - &request_frame, - &error, - &cancel, - err, - ); - return; - } - }; - - let result = request_handle - .request_typed::(ClientRequest::LoginAccount { - request_id: onboarding_request_id(), - params: LoginAccountParams::ChatgptAuthTokens { - access_token: local_auth.access_token, - chatgpt_account_id: local_auth.chatgpt_account_id, - chatgpt_plan_type: local_auth.chatgpt_plan_type, - }, - }) - .await; - apply_chatgpt_auth_tokens_login_response_for_active_attempt( - &sign_in_state, - &request_frame, - &error, - &cancel, - result.map_err(|err| err.to_string()), - ); -} - -fn apply_chatgpt_auth_tokens_login_response_for_active_attempt( - sign_in_state: &Arc>, - request_frame: &FrameRequester, - error: &Arc>>, - cancel: &Arc, - result: Result, -) { - match result { - Ok(LoginAccountResponse::ChatgptAuthTokens {}) => { - *error.write().unwrap() = None; - let _updated = set_device_code_success_message_for_active_attempt( - sign_in_state, - request_frame, - cancel, - ); - } - Ok(other) => { - set_device_code_error_for_active_attempt( - sign_in_state, - request_frame, - error, - cancel, - format!("Unexpected account/login/start response: {other:?}"), - ); - } - Err(err) => { - set_device_code_error_for_active_attempt( - sign_in_state, - request_frame, - error, - cancel, - err, - ); - } - } -} - #[cfg(test)] mod tests { use super::*; - use pretty_assertions::assert_eq; + use std::sync::Arc; + use std::sync::RwLock; - fn device_code_sign_in_state(cancel: Arc) -> Arc> { + fn pending_device_code_state(request_id: &str) -> Arc> { Arc::new(RwLock::new(SignInState::ChatGptDeviceCode( - ContinueWithDeviceCodeState { - device_code: None, - cancel: Some(cancel), - }, + ContinueWithDeviceCodeState::pending(request_id.to_string()), ))) } #[test] - fn device_code_attempt_matches_only_for_matching_cancel() { - let cancel = Arc::new(Notify::new()); - let state = SignInState::ChatGptDeviceCode(ContinueWithDeviceCodeState { - device_code: None, - cancel: Some(cancel.clone()), - }); - - assert_eq!(device_code_attempt_matches(&state, &cancel), true); - assert_eq!( - device_code_attempt_matches(&state, &Arc::new(Notify::new())), - false - ); - assert_eq!( - device_code_attempt_matches(&SignInState::PickMode, &cancel), - false - ); - } - - #[test] - fn begin_device_code_attempt_sets_state() { - let sign_in_state = Arc::new(RwLock::new(SignInState::PickMode)); - let request_frame = FrameRequester::test_dummy(); - - let cancel = begin_device_code_attempt(&sign_in_state, &request_frame); - let guard = sign_in_state.read().unwrap(); - - let state: &SignInState = &guard; - assert_eq!(device_code_attempt_matches(state, &cancel), true); - assert!(matches!( - state, - SignInState::ChatGptDeviceCode(state) if state.device_code.is_none() + fn device_code_attempt_matches_only_for_matching_request_id() { + let state = SignInState::ChatGptDeviceCode(ContinueWithDeviceCodeState::pending( + "request-1".to_string(), )); + + assert_eq!(device_code_attempt_matches(&state, "request-1"), true); + assert_eq!(device_code_attempt_matches(&state, "request-2"), false); + assert_eq!( + device_code_attempt_matches(&SignInState::PickMode, "request-1"), + false + ); } #[test] fn set_device_code_state_for_active_attempt_updates_only_when_active() { - let request_frame = FrameRequester::test_dummy(); - let cancel = Arc::new(Notify::new()); - let sign_in_state = device_code_sign_in_state(cancel.clone()); + let request_frame = crate::tui::FrameRequester::test_dummy(); + let sign_in_state = pending_device_code_state("request-1"); assert_eq!( set_device_code_state_for_active_attempt( &sign_in_state, &request_frame, - &cancel, - SignInState::PickMode, + "request-1", + ContinueWithDeviceCodeState::ready( + "request-1".to_string(), + "login-1".to_string(), + "https://example.com/device".to_string(), + "ABCD-EFGH".to_string(), + ), + ), + true + ); + assert!(matches!( + &*sign_in_state.read().unwrap(), + SignInState::ChatGptDeviceCode(state) if state.login_id() == Some("login-1") + )); + + let sign_in_state = pending_device_code_state("request-2"); + assert_eq!( + set_device_code_state_for_active_attempt( + &sign_in_state, + &request_frame, + "request-1", + ContinueWithDeviceCodeState::ready( + "request-1".to_string(), + "login-1".to_string(), + "https://example.com/device".to_string(), + "ABCD-EFGH".to_string(), + ), + ), + false + ); + assert!(matches!( + &*sign_in_state.read().unwrap(), + SignInState::ChatGptDeviceCode(state) if state.login_id.is_none() + )); + } + + #[test] + fn set_device_code_error_for_active_attempt_updates_only_when_active() { + let request_frame = crate::tui::FrameRequester::test_dummy(); + let error = Arc::new(RwLock::new(None)); + let sign_in_state = pending_device_code_state("request-1"); + + assert_eq!( + set_device_code_error_for_active_attempt( + &sign_in_state, + &request_frame, + &error, + "request-1", + "device code unavailable".to_string(), ), true ); @@ -470,79 +282,27 @@ mod tests { &*sign_in_state.read().unwrap(), SignInState::PickMode )); - - let sign_in_state = device_code_sign_in_state(Arc::new(Notify::new())); assert_eq!( - set_device_code_state_for_active_attempt( - &sign_in_state, - &request_frame, - &cancel, - SignInState::PickMode, - ), - false + error.read().unwrap().as_deref(), + Some("device code unavailable") ); - assert!(matches!( - &*sign_in_state.read().unwrap(), - SignInState::ChatGptDeviceCode(_) - )); - } - #[test] - fn set_device_code_success_message_for_active_attempt_updates_only_when_active() { - let request_frame = FrameRequester::test_dummy(); - let cancel = Arc::new(Notify::new()); - let sign_in_state = device_code_sign_in_state(cancel.clone()); - assert_eq!( - set_device_code_success_message_for_active_attempt( - &sign_in_state, - &request_frame, - &cancel, - ), - true - ); - assert!(matches!( - &*sign_in_state.read().unwrap(), - SignInState::ChatGptSuccessMessage - )); - - let sign_in_state = device_code_sign_in_state(Arc::new(Notify::new())); - assert_eq!( - set_device_code_success_message_for_active_attempt( - &sign_in_state, - &request_frame, - &cancel, - ), - false - ); - assert!(matches!( - &*sign_in_state.read().unwrap(), - SignInState::ChatGptDeviceCode(_) - )); - } - - #[test] - fn chatgpt_auth_tokens_success_sets_success_message_without_login_id() { - let sign_in_state = device_code_sign_in_state(Arc::new(Notify::new())); - let request_frame = FrameRequester::test_dummy(); let error = Arc::new(RwLock::new(None)); - let cancel = match &*sign_in_state.read().unwrap() { - SignInState::ChatGptDeviceCode(state) => { - state.cancel.as_ref().expect("cancel handle").clone() - } - _ => panic!("expected device-code state"), - }; - - apply_chatgpt_auth_tokens_login_response_for_active_attempt( - &sign_in_state, - &request_frame, - &error, - &cancel, - Ok(LoginAccountResponse::ChatgptAuthTokens {}), + let sign_in_state = pending_device_code_state("request-2"); + assert_eq!( + set_device_code_error_for_active_attempt( + &sign_in_state, + &request_frame, + &error, + "request-1", + "device code unavailable".to_string(), + ), + false ); - assert!(matches!( &*sign_in_state.read().unwrap(), - SignInState::ChatGptSuccessMessage + SignInState::ChatGptDeviceCode(_) )); + assert_eq!(*error.read().unwrap(), None); } } diff --git a/codex-rs/tui/src/onboarding/mod.rs b/codex-rs/tui/src/onboarding/mod.rs index 8a4dc760fa..0db5204d31 100644 --- a/codex-rs/tui/src/onboarding/mod.rs +++ b/codex-rs/tui/src/onboarding/mod.rs @@ -1,6 +1,5 @@ mod auth; -pub mod onboarding_screen; +pub(crate) mod onboarding_screen; mod trust_directory; pub(crate) use auth::mark_url_hyperlink; -pub use trust_directory::TrustDirectorySelection; mod welcome; diff --git a/codex-rs/tui/src/onboarding/onboarding_screen.rs b/codex-rs/tui/src/onboarding/onboarding_screen.rs index ec8e428947..be2e41bc8f 100644 --- a/codex-rs/tui/src/onboarding/onboarding_screen.rs +++ b/codex-rs/tui/src/onboarding/onboarding_screen.rs @@ -86,10 +86,8 @@ impl OnboardingScreen { config, } = args; let cwd = config.cwd.to_path_buf(); - let forced_chatgpt_workspace_id = config.forced_chatgpt_workspace_id.clone(); - let forced_login_method = config.forced_login_method; let codex_home = config.codex_home.clone(); - let cli_auth_credentials_store_mode = config.cli_auth_credentials_store_mode; + let forced_login_method = config.forced_login_method; let mut steps: Vec = Vec::new(); steps.push(Step::Welcome(WelcomeWidget::new( !matches!(login_status, LoginStatus::NotAuthenticated), @@ -107,13 +105,11 @@ impl OnboardingScreen { highlighted_mode, error: Arc::new(RwLock::new(None)), sign_in_state: Arc::new(RwLock::new(SignInState::PickMode)), - codex_home: codex_home.clone(), - cli_auth_credentials_store_mode, login_status, app_server_request_handle, - forced_chatgpt_workspace_id, forced_login_method, animations_enabled: config.animations, + animations_suppressed: std::cell::Cell::new(false), })); } else { tracing::warn!("skipping onboarding login step without app-server request handle"); @@ -175,6 +171,15 @@ impl OnboardingScreen { out } + fn should_suppress_animations(&self) -> bool { + // Freeze the whole onboarding screen when auth is showing copyable login + // material so terminal selection is not interrupted by redraws. + self.current_steps().into_iter().any(|step| match step { + Step::Auth(widget) => widget.should_suppress_animations(), + Step::Welcome(_) | Step::TrustDirectory(_) => false, + }) + } + fn is_auth_in_progress(&self) -> bool { self.steps.iter().any(|step| { matches!(step, Step::Auth(_)) && matches!(step.get_step_state(), StepState::InProgress) @@ -323,6 +328,15 @@ impl KeyboardHandler for OnboardingScreen { impl WidgetRef for &OnboardingScreen { fn render_ref(&self, area: Rect, buf: &mut Buffer) { + let suppress_animations = self.should_suppress_animations(); + for step in self.current_steps() { + match step { + Step::Welcome(widget) => widget.set_animations_suppressed(suppress_animations), + Step::Auth(widget) => widget.set_animations_suppressed(suppress_animations), + Step::TrustDirectory(_) => {} + } + } + Clear.render(area, buf); // Render steps top-to-bottom, measuring each step's height dynamically. let mut y = area.y; diff --git a/codex-rs/tui/src/onboarding/welcome.rs b/codex-rs/tui/src/onboarding/welcome.rs index fa668caabb..acb12abaab 100644 --- a/codex-rs/tui/src/onboarding/welcome.rs +++ b/codex-rs/tui/src/onboarding/welcome.rs @@ -27,6 +27,7 @@ pub(crate) struct WelcomeWidget { pub is_logged_in: bool, animation: AsciiAnimation, animations_enabled: bool, + animations_suppressed: Cell, layout_area: Cell>, } @@ -55,6 +56,7 @@ impl WelcomeWidget { is_logged_in, animation: AsciiAnimation::new(request_frame), animations_enabled, + animations_suppressed: Cell::new(false), layout_area: Cell::new(None), } } @@ -62,18 +64,23 @@ impl WelcomeWidget { pub(crate) fn update_layout_area(&self, area: Rect) { self.layout_area.set(Some(area)); } + + pub(crate) fn set_animations_suppressed(&self, suppressed: bool) { + self.animations_suppressed.set(suppressed); + } } impl WidgetRef for &WelcomeWidget { fn render_ref(&self, area: Rect, buf: &mut Buffer) { Clear.render(area, buf); - if self.animations_enabled { + if self.animations_enabled && !self.animations_suppressed.get() { self.animation.schedule_next_frame(); } let layout_area = self.layout_area.get().unwrap_or(area); // Skip the animation entirely when the viewport is too small so we don't clip frames. let show_animation = self.animations_enabled + && !self.animations_suppressed.get() && layout_area.height >= MIN_ANIMATION_HEIGHT && layout_area.width >= MIN_ANIMATION_WIDTH; @@ -167,6 +174,7 @@ mod tests { /*variant_idx*/ 0, ), animations_enabled: true, + animations_suppressed: Cell::new(false), layout_area: Cell::new(None), }; diff --git a/codex-rs/tui/src/public_widgets/mod.rs b/codex-rs/tui/src/public_widgets/mod.rs index c752189f3b..b8db1c6bff 100644 --- a/codex-rs/tui/src/public_widgets/mod.rs +++ b/codex-rs/tui/src/public_widgets/mod.rs @@ -1 +1 @@ -pub mod composer_input; +pub(crate) mod composer_input; diff --git a/codex-rs/tui/src/render/mod.rs b/codex-rs/tui/src/render/mod.rs index a2e4920586..02c19fc9f7 100644 --- a/codex-rs/tui/src/render/mod.rs +++ b/codex-rs/tui/src/render/mod.rs @@ -1,8 +1,8 @@ use ratatui::layout::Rect; -pub mod highlight; -pub mod line_utils; -pub mod renderable; +pub(crate) mod highlight; +pub(crate) mod line_utils; +pub(crate) mod renderable; #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct Insets { diff --git a/codex-rs/tui/src/snapshots/codex_tui__history_cell__tests__mcp_tools_output_from_statuses_renders_status_only_servers.snap b/codex-rs/tui/src/snapshots/codex_tui__history_cell__tests__mcp_tools_output_from_statuses_renders_status_only_servers.snap index ee84771027..709ce6d691 100644 --- a/codex-rs/tui/src/snapshots/codex_tui__history_cell__tests__mcp_tools_output_from_statuses_renders_status_only_servers.snap +++ b/codex-rs/tui/src/snapshots/codex_tui__history_cell__tests__mcp_tools_output_from_statuses_renders_status_only_servers.snap @@ -10,5 +10,3 @@ expression: rendered • Auth: Unsupported • Command: docs-server --stdio • Tools: lookup - • Resources: (none) - • Resource templates: (none) diff --git a/codex-rs/tui/src/status/card.rs b/codex-rs/tui/src/status/card.rs index 781f4ae311..a737262139 100644 --- a/codex-rs/tui/src/status/card.rs +++ b/codex-rs/tui/src/status/card.rs @@ -28,7 +28,6 @@ use super::format::line_display_width; use super::format::push_label; use super::format::truncate_line_to_width; use super::helpers::compose_account_display; -use super::helpers::compose_agents_summary; use super::helpers::compose_model_display; use super::helpers::format_directory_display; use super::helpers::format_tokens_compact; @@ -68,10 +67,20 @@ struct StatusRateLimitState { #[derive(Debug, Clone)] pub(crate) struct StatusHistoryHandle { + agents_summary: Arc>, rate_limit_state: Arc>, } impl StatusHistoryHandle { + pub(crate) fn finish_agents_summary_discovery(&self, agents_summary: String) { + #[expect(clippy::expect_used)] + let mut current = self + .agents_summary + .write() + .expect("status history agents summary state poisoned"); + *current = agents_summary; + } + pub(crate) fn finish_rate_limit_refresh( &self, rate_limits: &[RateLimitSnapshotDisplay], @@ -98,7 +107,7 @@ struct StatusHistoryCell { model_details: Vec, directory: PathBuf, permissions: String, - agents_summary: String, + agents_summary: Arc>, collaboration_mode: Option, model_provider: Option, account: Option, @@ -177,6 +186,7 @@ pub(crate) fn new_status_output_with_rate_limits( model_name, collaboration_mode, reasoning_effort_override, + "".to_string(), refreshing_rate_limits, ) .0 @@ -197,6 +207,7 @@ pub(crate) fn new_status_output_with_rate_limits_handle( model_name: &str, collaboration_mode: Option<&str>, reasoning_effort_override: Option>, + agents_summary: String, refreshing_rate_limits: bool, ) -> (CompositeHistoryCell, StatusHistoryHandle) { let command = PlainHistoryCell::new(vec!["/status".magenta().into()]); @@ -214,6 +225,7 @@ pub(crate) fn new_status_output_with_rate_limits_handle( model_name, collaboration_mode, reasoning_effort_override, + agents_summary, refreshing_rate_limits, ); @@ -239,6 +251,7 @@ impl StatusHistoryCell { model_name: &str, collaboration_mode: Option<&str>, reasoning_effort_override: Option>, + agents_summary: String, refreshing_rate_limits: bool, ) -> (Self, StatusHistoryHandle) { let mut config_entries = vec![ @@ -302,7 +315,6 @@ impl StatusHistoryCell { } else { format!("Custom ({sandbox}, {approval})") }; - let agents_summary = compose_agents_summary(config); let model_provider = format_model_provider(config); let account = compose_account_display(account_display); let session_id = session_id.as_ref().map(std::string::ToString::to_string); @@ -333,6 +345,7 @@ impl StatusHistoryCell { rate_limits, refreshing_rate_limits, })); + let agents_summary = Arc::new(RwLock::new(agents_summary)); ( Self { @@ -340,7 +353,6 @@ impl StatusHistoryCell { model_details, directory: config.cwd.to_path_buf(), permissions, - agents_summary, collaboration_mode: collaboration_mode.map(ToString::to_string), model_provider, account, @@ -348,9 +360,13 @@ impl StatusHistoryCell { session_id, forked_from, token_usage, + agents_summary: agents_summary.clone(), rate_limit_state: rate_limit_state.clone(), }, - StatusHistoryHandle { rate_limit_state }, + StatusHistoryHandle { + agents_summary, + rate_limit_state, + }, ) } @@ -564,6 +580,12 @@ impl HistoryCell for StatusHistoryCell { .rate_limit_state .read() .expect("status history rate-limit state poisoned"); + #[expect(clippy::expect_used)] + let agents_summary = self + .agents_summary + .read() + .expect("status history agents summary state poisoned") + .clone(); if self.model_provider.is_some() { push_label(&mut labels, &mut seen, "Model provider"); @@ -625,7 +647,7 @@ impl HistoryCell for StatusHistoryCell { } lines.push(formatter.line("Directory", vec![Span::from(directory_value)])); lines.push(formatter.line("Permissions", vec![Span::from(self.permissions.clone())])); - lines.push(formatter.line("Agents.md", vec![Span::from(self.agents_summary.clone())])); + lines.push(formatter.line("Agents.md", vec![Span::from(agents_summary)])); if let Some(account_value) = account_value { lines.push(formatter.line("Account", vec![Span::from(account_value)])); diff --git a/codex-rs/tui/src/status/helpers.rs b/codex-rs/tui/src/status/helpers.rs index 7813094c35..76aaa3da1a 100644 --- a/codex-rs/tui/src/status/helpers.rs +++ b/codex-rs/tui/src/status/helpers.rs @@ -4,8 +4,11 @@ use crate::text_formatting; use chrono::DateTime; use chrono::Local; use codex_core::config::Config; -use codex_core::project_doc::discover_project_doc_paths; +use codex_core::discover_project_doc_paths; +use codex_exec_server::LOCAL_FS; use codex_protocol::account::PlanType; +use codex_utils_absolute_path::AbsolutePathBuf; +use std::io; use std::path::Path; use unicode_width::UnicodeWidthStr; @@ -33,51 +36,52 @@ pub(crate) fn compose_model_display( (model_name.to_string(), details) } -pub(crate) fn compose_agents_summary(config: &Config) -> String { - match discover_project_doc_paths(config) { - Ok(paths) => { - let mut rels: Vec = Vec::new(); - for p in paths { - let file_name = p - .file_name() - .map(|name| name.to_string_lossy().to_string()) - .unwrap_or_else(|| "".to_string()); - let display = if let Some(parent) = p.parent() { - if parent == config.cwd.as_path() { - file_name.clone() - } else { - let mut cur = config.cwd.as_path(); - let mut ups = 0usize; - let mut reached = false; - while let Some(c) = cur.parent() { - if cur == parent { - reached = true; - break; - } - cur = c; - ups += 1; - } - if reached { - let up = format!("..{}", std::path::MAIN_SEPARATOR); - format!("{}{}", up.repeat(ups), file_name) - } else if let Ok(stripped) = p.strip_prefix(&config.cwd) { - normalize_agents_display_path(stripped) - } else { - normalize_agents_display_path(&p) - } - } - } else { - normalize_agents_display_path(&p) - }; - rels.push(display); - } - if rels.is_empty() { - "".to_string() +pub(crate) async fn discover_agents_summary(config: &Config) -> io::Result { + let paths = discover_project_doc_paths(config, LOCAL_FS.as_ref()).await?; + Ok(compose_agents_summary(config, &paths)) +} + +pub(crate) fn compose_agents_summary(config: &Config, paths: &[AbsolutePathBuf]) -> String { + let mut rels: Vec = Vec::new(); + for p in paths { + let file_name = p + .file_name() + .map(|name| name.to_string_lossy().to_string()) + .unwrap_or_else(|| "".to_string()); + let display = if let Some(parent) = p.parent() { + if parent.as_path() == config.cwd.as_path() { + file_name.clone() } else { - rels.join(", ") + let mut cur = config.cwd.as_path(); + let mut ups = 0usize; + let mut reached = false; + while let Some(c) = cur.parent() { + if cur == parent.as_path() { + reached = true; + break; + } + cur = c; + ups += 1; + } + if reached { + let up = format!("..{}", std::path::MAIN_SEPARATOR); + format!("{}{}", up.repeat(ups), file_name) + } else if let Ok(stripped) = p.strip_prefix(&config.cwd) { + normalize_agents_display_path(stripped) + } else { + normalize_agents_display_path(p) + } } - } - Err(_) => "".to_string(), + } else { + normalize_agents_display_path(p) + }; + rels.push(display); + } + + if rels.is_empty() { + "".to_string() + } else { + rels.join(", ") } } diff --git a/codex-rs/tui/src/status/mod.rs b/codex-rs/tui/src/status/mod.rs index e09955a6a6..1781104efd 100644 --- a/codex-rs/tui/src/status/mod.rs +++ b/codex-rs/tui/src/status/mod.rs @@ -19,6 +19,7 @@ pub(crate) use card::new_status_output; #[cfg(test)] pub(crate) use card::new_status_output_with_rate_limits; pub(crate) use card::new_status_output_with_rate_limits_handle; +pub(crate) use helpers::discover_agents_summary; pub(crate) use helpers::format_directory_display; pub(crate) use helpers::format_tokens_compact; pub(crate) use helpers::plan_type_display_name; diff --git a/codex-rs/tui/src/test_support.rs b/codex-rs/tui/src/test_support.rs index 790fc805ea..eb284a5990 100644 --- a/codex-rs/tui/src/test_support.rs +++ b/codex-rs/tui/src/test_support.rs @@ -1,27 +1,6 @@ -use codex_utils_absolute_path::AbsolutePathBuf; +pub(crate) use codex_utils_absolute_path::test_support::PathBufExt; +pub(crate) use codex_utils_absolute_path::test_support::PathExt; use std::path::Path; -use std::path::PathBuf; - -pub(crate) trait PathExt { - fn abs(&self) -> AbsolutePathBuf; -} - -impl PathExt for Path { - fn abs(&self) -> AbsolutePathBuf { - AbsolutePathBuf::try_from(self.to_path_buf()) - .unwrap_or_else(|_| panic!("path should already be absolute")) - } -} - -pub(crate) trait PathBufExt { - fn abs(&self) -> AbsolutePathBuf; -} - -impl PathBufExt for PathBuf { - fn abs(&self) -> AbsolutePathBuf { - self.as_path().abs() - } -} pub(crate) fn test_path_display(path: &str) -> String { Path::new(path).abs().display().to_string() diff --git a/codex-rs/tui/tests/suite/vt100_history.rs b/codex-rs/tui/tests/suite/vt100_history.rs index 2b89363e66..609671cc35 100644 --- a/codex-rs/tui/tests/suite/vt100_history.rs +++ b/codex-rs/tui/tests/suite/vt100_history.rs @@ -22,20 +22,20 @@ macro_rules! assert_contains { } struct TestScenario { - term: codex_tui::custom_terminal::Terminal, + term: codex_tui::Terminal, } impl TestScenario { fn new(width: u16, height: u16, viewport: Rect) -> Self { let backend = VT100Backend::new(width, height); - let mut term = codex_tui::custom_terminal::Terminal::with_options(backend) - .expect("failed to construct terminal"); + let mut term = + codex_tui::Terminal::with_options(backend).expect("failed to construct terminal"); term.set_viewport_area(viewport); Self { term } } fn run_insert(&mut self, lines: Vec>) { - codex_tui::insert_history::insert_history_lines(&mut self.term, lines) + codex_tui::insert_history_lines(&mut self.term, lines) .expect("Failed to insert history lines in test"); } } diff --git a/codex-rs/tui/tests/suite/vt100_live_commit.rs b/codex-rs/tui/tests/suite/vt100_live_commit.rs index 4c13bb402d..01b3f72578 100644 --- a/codex-rs/tui/tests/suite/vt100_live_commit.rs +++ b/codex-rs/tui/tests/suite/vt100_live_commit.rs @@ -5,7 +5,7 @@ use ratatui::text::Line; #[test] fn live_001_commit_on_overflow() { let backend = VT100Backend::new(/*width*/ 20, /*height*/ 6); - let mut term = match codex_tui::custom_terminal::Terminal::with_options(backend) { + let mut term = match codex_tui::Terminal::with_options(backend) { Ok(t) => t, Err(e) => panic!("failed to construct terminal: {e}"), }; @@ -15,7 +15,7 @@ fn live_001_commit_on_overflow() { term.set_viewport_area(area); // Build 5 explicit rows at width 20. - let mut rb = codex_tui::live_wrap::RowBuilder::new(/*target_width*/ 20); + let mut rb = codex_tui::RowBuilder::new(/*target_width*/ 20); rb.push_fragment("one\n"); rb.push_fragment("two\n"); rb.push_fragment("three\n"); @@ -26,7 +26,7 @@ fn live_001_commit_on_overflow() { let commit_rows = rb.drain_commit_ready(/*max_keep*/ 3); let lines: Vec> = commit_rows.into_iter().map(|r| r.text.into()).collect(); - codex_tui::insert_history::insert_history_lines(&mut term, lines) + codex_tui::insert_history_lines(&mut term, lines) .expect("Failed to insert history lines in test"); let screen = term.backend().vt100().screen(); diff --git a/codex-rs/utils/absolute-path/Cargo.toml b/codex-rs/utils/absolute-path/Cargo.toml index f4fa8bf789..801bc9c1f0 100644 --- a/codex-rs/utils/absolute-path/Cargo.toml +++ b/codex-rs/utils/absolute-path/Cargo.toml @@ -10,7 +10,6 @@ workspace = true [dependencies] dirs = { workspace = true } -path-absolutize = { workspace = true } schemars = { workspace = true } serde = { workspace = true, features = ["derive"] } ts-rs = { workspace = true, features = [ diff --git a/codex-rs/utils/absolute-path/src/absolutize.rs b/codex-rs/utils/absolute-path/src/absolutize.rs new file mode 100644 index 0000000000..7965c81f7b --- /dev/null +++ b/codex-rs/utils/absolute-path/src/absolutize.rs @@ -0,0 +1,167 @@ +// Adapted from path-absolutize 3.1.1: +// Copyright (c) 2018 magiclen.org (Ron Li) +// Licensed under the MIT License. +// +// Keep this implementation local so explicit-base normalization can be +// infallible for `AbsolutePathBuf::resolve_path_against_base` and +// `AbsolutePathBuf::join`; only current-working-directory lookup remains +// fallible. + +use std::path::Component; +use std::path::Path; +use std::path::PathBuf; + +pub(super) fn absolutize(path: &Path) -> std::io::Result { + Ok(absolutize_from(path, &std::env::current_dir()?)) +} + +pub(super) fn absolutize_from(path: &Path, base_path: &Path) -> PathBuf { + normalize_path(&path_with_base(path, base_path)) +} + +fn normalize_path(path: &Path) -> PathBuf { + let mut normalized = PathBuf::new(); + for component in path.components() { + match component { + Component::CurDir => {} + Component::ParentDir => { + normalized.pop(); + } + Component::Prefix(_) | Component::RootDir | Component::Normal(_) => { + normalized.push(component.as_os_str()); + } + } + } + + if normalized.as_os_str().is_empty() { + PathBuf::from(".") + } else { + normalized + } +} + +#[cfg(not(windows))] +fn path_with_base(path: &Path, base_path: &Path) -> PathBuf { + if path.is_absolute() { + path.to_path_buf() + } else { + base_path.join(path) + } +} + +#[cfg(windows)] +fn path_with_base(path: &Path, base_path: &Path) -> PathBuf { + if path.is_absolute() || path.has_root() { + return base_path.join(path); + } + + let mut components = path.components(); + let Some(Component::Prefix(prefix)) = components.next() else { + return base_path.join(path); + }; + + let mut path = PathBuf::new(); + path.push(prefix.as_os_str()); + + if components.clone().next().is_none() { + path.push(std::path::MAIN_SEPARATOR_STR); + return path; + } + + let skip_base_prefix = matches!(base_path.components().next(), Some(Component::Prefix(_))); + for component in base_path + .components() + .skip(usize::from(skip_base_prefix)) + .chain(components) + { + path.push(component.as_os_str()); + } + path +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + + #[cfg(unix)] + #[test] + fn absolute_path_without_dots_is_unchanged() { + assert_eq!( + absolutize_from(Path::new("/path/to/123/456"), Path::new("/base")), + PathBuf::from("/path/to/123/456") + ); + } + + #[cfg(unix)] + #[test] + fn absolute_path_dots_are_removed() { + assert_eq!( + absolutize_from(Path::new("/path/to/./123/../456"), Path::new("/base")), + PathBuf::from("/path/to/456") + ); + } + + #[cfg(unix)] + #[test] + fn relative_path_without_dot_uses_base() { + assert_eq!( + absolutize_from(Path::new("path/to/123/456"), Path::new("/base")), + PathBuf::from("/base/path/to/123/456") + ); + } + + #[cfg(unix)] + #[test] + fn relative_path_with_current_dir_uses_base() { + assert_eq!( + absolutize_from(Path::new("./path/to/123/456"), Path::new("/base")), + PathBuf::from("/base/path/to/123/456") + ); + } + + #[cfg(unix)] + #[test] + fn relative_path_with_parent_dir_uses_base_parent() { + assert_eq!( + absolutize_from(Path::new("../path/to/123/456"), Path::new("/base/cwd")), + PathBuf::from("/base/path/to/123/456") + ); + } + + #[cfg(unix)] + #[test] + fn parent_dir_above_root_stays_at_root() { + assert_eq!( + absolutize_from(Path::new("../../path/to/123/456"), Path::new("/")), + PathBuf::from("/path/to/123/456") + ); + } + + #[cfg(unix)] + #[test] + fn empty_path_uses_base() { + assert_eq!( + absolutize_from(Path::new(""), Path::new("/base/cwd")), + PathBuf::from("/base/cwd") + ); + } + + #[cfg(windows)] + #[test] + fn windows_root_relative_path_uses_base_prefix() { + assert_eq!( + absolutize_from(Path::new(r"\path\to\file"), Path::new(r"C:\base\cwd")), + PathBuf::from(r"C:\path\to\file") + ); + } + + #[cfg(windows)] + #[test] + fn windows_drive_relative_path_uses_path_prefix_and_base_tail() { + assert_eq!( + absolutize_from(Path::new(r"D:path\to\file"), Path::new(r"C:\base\cwd")), + PathBuf::from(r"D:\base\cwd\path\to\file") + ); + } +} diff --git a/codex-rs/utils/absolute-path/src/lib.rs b/codex-rs/utils/absolute-path/src/lib.rs index 0de0f4e023..a09ce8f0cb 100644 --- a/codex-rs/utils/absolute-path/src/lib.rs +++ b/codex-rs/utils/absolute-path/src/lib.rs @@ -1,5 +1,4 @@ use dirs::home_dir; -use path_absolutize::Absolutize; use schemars::JsonSchema; use serde::Deserialize; use serde::Deserializer; @@ -11,6 +10,8 @@ use std::path::Path; use std::path::PathBuf; use ts_rs::TS; +mod absolutize; + /// A path that is guaranteed to be absolute and normalized (though it is not /// guaranteed to be canonicalized or exist on the filesystem). /// @@ -43,30 +44,34 @@ impl AbsolutePathBuf { pub fn resolve_path_against_base, B: AsRef>( path: P, base_path: B, - ) -> std::io::Result { + ) -> Self { let expanded = Self::maybe_expand_home_directory(path.as_ref()); - let absolute_path = expanded.absolutize_from(base_path.as_ref())?; - Ok(Self(absolute_path.into_owned())) + Self(absolutize::absolutize_from(&expanded, base_path.as_ref())) } pub fn from_absolute_path>(path: P) -> std::io::Result { let expanded = Self::maybe_expand_home_directory(path.as_ref()); - let absolute_path = expanded.absolutize()?; - Ok(Self(absolute_path.into_owned())) + Ok(Self(absolutize::absolutize(&expanded)?)) } pub fn current_dir() -> std::io::Result { let current_dir = std::env::current_dir()?; - Self::from_absolute_path(current_dir) + Ok(Self(absolutize::absolutize_from( + ¤t_dir, + ¤t_dir, + ))) } /// Construct an absolute path from `path`, resolving relative paths against /// the process current working directory. pub fn relative_to_current_dir>(path: P) -> std::io::Result { - Self::resolve_path_against_base(path, std::env::current_dir()?) + Ok(Self::resolve_path_against_base( + path, + std::env::current_dir()?, + )) } - pub fn join>(&self, path: P) -> std::io::Result { + pub fn join>(&self, path: P) -> Self { Self::resolve_path_against_base(path, &self.0) } @@ -121,6 +126,38 @@ impl From for PathBuf { } } +/// Helpers for constructing absolute paths in tests. +pub mod test_support { + use super::AbsolutePathBuf; + use std::path::Path; + use std::path::PathBuf; + + /// Extension methods for converting paths into [`AbsolutePathBuf`] values in tests. + pub trait PathExt { + /// Converts an already absolute path into an [`AbsolutePathBuf`]. + fn abs(&self) -> AbsolutePathBuf; + } + + impl PathExt for Path { + #[expect(clippy::expect_used)] + fn abs(&self) -> AbsolutePathBuf { + AbsolutePathBuf::try_from(self).expect("path should already be absolute") + } + } + + /// Extension methods for converting path buffers into [`AbsolutePathBuf`] values in tests. + pub trait PathBufExt { + /// Converts an already absolute path buffer into an [`AbsolutePathBuf`]. + fn abs(&self) -> AbsolutePathBuf; + } + + impl PathBufExt for PathBuf { + fn abs(&self) -> AbsolutePathBuf { + self.as_path().abs() + } + } +} + impl TryFrom<&Path> for AbsolutePathBuf { type Error = std::io::Error; @@ -187,9 +224,7 @@ impl<'de> Deserialize<'de> for AbsolutePathBuf { { let path = PathBuf::deserialize(deserializer)?; ABSOLUTE_PATH_BASE.with(|cell| match cell.borrow().as_deref() { - Some(base) => { - Ok(Self::resolve_path_against_base(path, base).map_err(SerdeError::custom)?) - } + Some(base) => Ok(Self::resolve_path_against_base(path, base)), None if path.is_absolute() => { Self::from_absolute_path(path).map_err(SerdeError::custom) } @@ -213,8 +248,7 @@ mod tests { let base_path = base_dir.path(); let absolute_path = absolute_dir.path().join("file.txt"); let abs_path_buf = - AbsolutePathBuf::resolve_path_against_base(absolute_path.clone(), base_path) - .expect("failed to create"); + AbsolutePathBuf::resolve_path_against_base(absolute_path.clone(), base_path); assert_eq!(abs_path_buf.as_path(), absolute_path.as_path()); } @@ -222,8 +256,16 @@ mod tests { fn relative_path_is_resolved_against_base_path() { let temp_dir = tempdir().expect("base dir"); let base_dir = temp_dir.path(); - let abs_path_buf = AbsolutePathBuf::resolve_path_against_base("file.txt", base_dir) - .expect("failed to create"); + let abs_path_buf = AbsolutePathBuf::resolve_path_against_base("file.txt", base_dir); + assert_eq!(abs_path_buf.as_path(), base_dir.join("file.txt").as_path()); + } + + #[test] + fn relative_path_dots_are_normalized_against_base_path() { + let temp_dir = tempdir().expect("base dir"); + let base_dir = temp_dir.path(); + let abs_path_buf = + AbsolutePathBuf::resolve_path_against_base("./nested/../file.txt", base_dir); assert_eq!(abs_path_buf.as_path(), base_dir.join("file.txt").as_path()); } diff --git a/codex-rs/utils/cli/src/lib.rs b/codex-rs/utils/cli/src/lib.rs index b3ce23b908..ed00d683f9 100644 --- a/codex-rs/utils/cli/src/lib.rs +++ b/codex-rs/utils/cli/src/lib.rs @@ -1,8 +1,9 @@ mod approval_mode_cli_arg; mod config_override; -pub mod format_env_display; +pub(crate) mod format_env_display; mod sandbox_mode_cli_arg; pub use approval_mode_cli_arg::ApprovalModeCliArg; pub use config_override::CliConfigOverrides; +pub use format_env_display::format_env_display; pub use sandbox_mode_cli_arg::SandboxModeCliArg; diff --git a/codex-rs/utils/image/src/lib.rs b/codex-rs/utils/image/src/lib.rs index b150f76a18..9d491bc107 100644 --- a/codex-rs/utils/image/src/lib.rs +++ b/codex-rs/utils/image/src/lib.rs @@ -2,7 +2,6 @@ use std::num::NonZeroUsize; use std::path::Path; use std::sync::LazyLock; -use crate::error::ImageProcessingError; use base64::Engine; use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; use codex_utils_cache::BlockingLruCache; @@ -21,7 +20,9 @@ pub const MAX_WIDTH: u32 = 2048; /// Maximum height used when resizing images before uploading. pub const MAX_HEIGHT: u32 = 768; -pub mod error; +pub(crate) mod error; + +pub use crate::error::ImageProcessingError; #[derive(Debug, Clone)] pub struct EncodedImage { diff --git a/codex-rs/utils/path-utils/src/lib.rs b/codex-rs/utils/path-utils/src/lib.rs index a6dc6f8b3a..7e44e06d13 100644 --- a/codex-rs/utils/path-utils/src/lib.rs +++ b/codex-rs/utils/path-utils/src/lib.rs @@ -1,6 +1,8 @@ //! Path normalization, symlink resolution, and atomic writes shared across Codex crates. -pub mod env; +pub(crate) mod env; +pub use env::is_headless_environment; +pub use env::is_wsl; use codex_utils_absolute_path::AbsolutePathBuf; use std::collections::HashSet; @@ -82,7 +84,7 @@ pub fn resolve_symlink_write_paths(path: &Path) -> io::Result let next = if target.is_absolute() { AbsolutePathBuf::from_absolute_path(&target) } else if let Some(parent) = current.parent() { - AbsolutePathBuf::resolve_path_against_base(&target, parent) + Ok(AbsolutePathBuf::resolve_path_against_base(&target, parent)) } else { return Ok(SymlinkWritePaths { read_path: None, diff --git a/codex-rs/utils/pty/src/win/mod.rs b/codex-rs/utils/pty/src/win/mod.rs index 78f5af2ce7..33b6e52fbd 100644 --- a/codex-rs/utils/pty/src/win/mod.rs +++ b/codex-rs/utils/pty/src/win/mod.rs @@ -44,7 +44,7 @@ use winapi::um::processthreadsapi::*; use winapi::um::synchapi::WaitForSingleObject; use winapi::um::winbase::INFINITE; -pub mod conpty; +pub(crate) mod conpty; mod procthreadattr; mod psuedocon; diff --git a/codex-rs/windows-sandbox-rs/src/elevated/command_runner_win.rs b/codex-rs/windows-sandbox-rs/src/elevated/command_runner_win.rs index 7777055e28..9cb59e18dc 100644 --- a/codex-rs/windows-sandbox-rs/src/elevated/command_runner_win.rs +++ b/codex-rs/windows-sandbox-rs/src/elevated/command_runner_win.rs @@ -12,31 +12,33 @@ use anyhow::Context; use anyhow::Result; +use codex_windows_sandbox::ErrorPayload; +use codex_windows_sandbox::ExitPayload; +use codex_windows_sandbox::FramedMessage; +use codex_windows_sandbox::Message; +use codex_windows_sandbox::OutputPayload; +use codex_windows_sandbox::OutputStream; use codex_windows_sandbox::PipeSpawnHandles; use codex_windows_sandbox::SandboxPolicy; +use codex_windows_sandbox::SpawnReady; +use codex_windows_sandbox::SpawnRequest; use codex_windows_sandbox::StderrMode; use codex_windows_sandbox::StdinMode; use codex_windows_sandbox::allow_null_device; use codex_windows_sandbox::convert_string_sid_to_sid; use codex_windows_sandbox::create_readonly_token_with_caps_from; use codex_windows_sandbox::create_workspace_write_token_with_caps_from; +use codex_windows_sandbox::decode_bytes; +use codex_windows_sandbox::encode_bytes; use codex_windows_sandbox::get_current_token_for_restriction; use codex_windows_sandbox::hide_current_user_profile_dir; -use codex_windows_sandbox::ipc_framed::ErrorPayload; -use codex_windows_sandbox::ipc_framed::ExitPayload; -use codex_windows_sandbox::ipc_framed::FramedMessage; -use codex_windows_sandbox::ipc_framed::Message; -use codex_windows_sandbox::ipc_framed::OutputPayload; -use codex_windows_sandbox::ipc_framed::OutputStream; -use codex_windows_sandbox::ipc_framed::decode_bytes; -use codex_windows_sandbox::ipc_framed::encode_bytes; -use codex_windows_sandbox::ipc_framed::read_frame; -use codex_windows_sandbox::ipc_framed::write_frame; use codex_windows_sandbox::log_note; use codex_windows_sandbox::parse_policy; +use codex_windows_sandbox::read_frame; use codex_windows_sandbox::read_handle_loop; use codex_windows_sandbox::spawn_process_with_pipes; use codex_windows_sandbox::to_wide; +use codex_windows_sandbox::write_frame; use std::ffi::c_void; use std::fs::File; use std::os::windows::io::FromRawHandle; @@ -144,9 +146,7 @@ fn send_error(writer: &Arc>, code: &str, message: String) -> Resu } /// Read and validate the initial spawn request frame. -fn read_spawn_request( - reader: &mut File, -) -> Result { +fn read_spawn_request(reader: &mut File) -> Result { let Some(msg) = read_frame(reader)? else { anyhow::bail!("runner: pipe closed before spawn_request"); }; @@ -184,9 +184,7 @@ fn effective_cwd(req_cwd: &Path, log_dir: Option<&Path>) -> PathBuf { } } -fn spawn_ipc_process( - req: &codex_windows_sandbox::ipc_framed::SpawnRequest, -) -> Result { +fn spawn_ipc_process(req: &SpawnRequest) -> Result { let log_dir = req.codex_home.clone(); hide_current_user_profile_dir(req.codex_home.as_path()); log_note( @@ -466,7 +464,7 @@ pub fn main() -> Result<()> { let msg = FramedMessage { version: 1, message: Message::SpawnReady { - payload: codex_windows_sandbox::ipc_framed::SpawnReady { + payload: SpawnReady { process_id: unsafe { GetProcessId(pi.hProcess) }, }, }, diff --git a/codex-rs/windows-sandbox-rs/src/lib.rs b/codex-rs/windows-sandbox-rs/src/lib.rs index 9dea9414ba..dd8f23d00f 100644 --- a/codex-rs/windows-sandbox-rs/src/lib.rs +++ b/codex-rs/windows-sandbox-rs/src/lib.rs @@ -34,7 +34,7 @@ mod conpty; #[cfg(target_os = "windows")] #[path = "elevated/ipc_framed.rs"] -pub mod ipc_framed; +pub(crate) mod ipc_framed; #[cfg(target_os = "windows")] #[path = "setup_orchestrator.rs"] @@ -88,6 +88,30 @@ pub use identity::require_logon_sandbox_creds; #[cfg(target_os = "windows")] pub use identity::sandbox_setup_is_complete; #[cfg(target_os = "windows")] +pub use ipc_framed::ErrorPayload; +#[cfg(target_os = "windows")] +pub use ipc_framed::ExitPayload; +#[cfg(target_os = "windows")] +pub use ipc_framed::FramedMessage; +#[cfg(target_os = "windows")] +pub use ipc_framed::Message; +#[cfg(target_os = "windows")] +pub use ipc_framed::OutputPayload; +#[cfg(target_os = "windows")] +pub use ipc_framed::OutputStream; +#[cfg(target_os = "windows")] +pub use ipc_framed::SpawnReady; +#[cfg(target_os = "windows")] +pub use ipc_framed::SpawnRequest; +#[cfg(target_os = "windows")] +pub use ipc_framed::decode_bytes; +#[cfg(target_os = "windows")] +pub use ipc_framed::encode_bytes; +#[cfg(target_os = "windows")] +pub use ipc_framed::read_frame; +#[cfg(target_os = "windows")] +pub use ipc_framed::write_frame; +#[cfg(target_os = "windows")] pub use logging::LOG_FILE_NAME; #[cfg(target_os = "windows")] pub use logging::log_note; diff --git a/patches/BUILD.bazel b/patches/BUILD.bazel index 75d6be358e..4db61293c2 100644 --- a/patches/BUILD.bazel +++ b/patches/BUILD.bazel @@ -3,6 +3,7 @@ exports_files([ "aws-lc-sys_memcmp_check.patch", "aws-lc-sys_windows_msvc_prebuilt_nasm.patch", "aws-lc-sys_windows_msvc_memcmp_probe.patch", + "bzip2_windows_stack_args.patch", "llvm_windows_symlink_extract.patch", "rules_rust_windows_bootstrap_process_wrapper_linker.patch", "rules_rust_windows_build_script_runner_paths.patch", @@ -20,5 +21,6 @@ exports_files([ "v8_module_deps.patch", "v8_source_portability.patch", "windows-link.patch", + "xz_windows_stack_args.patch", "zstd-sys_windows_msvc_include_dirs.patch", ]) diff --git a/patches/bzip2_windows_stack_args.patch b/patches/bzip2_windows_stack_args.patch new file mode 100644 index 0000000000..2786e908be --- /dev/null +++ b/patches/bzip2_windows_stack_args.patch @@ -0,0 +1,23 @@ +diff --git a/BUILD.bazel b/BUILD.bazel +--- a/BUILD.bazel ++++ b/BUILD.bazel +@@ -28,4 +28,11 @@ cc_library( + defines = [ + "_FILE_OFFSET_BITS=64", + ], ++ copts = select({ ++ "@platforms//os:windows": [ ++ "-fno-stack-protector", ++ "-mno-stack-arg-probe", ++ ], ++ "//conditions:default": [], ++ }), + includes = ["."], +diff --git a/MODULE.bazel b/MODULE.bazel +--- a/MODULE.bazel ++++ b/MODULE.bazel +@@ -4,3 +4,4 @@ module( + ) + + bazel_dep(name = "rules_cc", version = "0.0.10") ++bazel_dep(name = "platforms", version = "1.0.0") diff --git a/patches/xz_windows_stack_args.patch b/patches/xz_windows_stack_args.patch new file mode 100644 index 0000000000..926fb0a6dc --- /dev/null +++ b/patches/xz_windows_stack_args.patch @@ -0,0 +1,14 @@ +diff --git a/BUILD.bazel b/BUILD.bazel +--- a/BUILD.bazel ++++ b/BUILD.bazel +@@ -154,6 +154,9 @@ cc_library( + ], + copts = select({ +- "@platforms//os:windows": [], ++ "@platforms//os:windows": [ ++ "-fno-stack-protector", ++ "-mno-stack-arg-probe", ++ ], + "//conditions:default": ["-std=c99"], + }), + defines = select({ diff --git a/scripts/test-remote-env.sh b/scripts/test-remote-env.sh index 60fb447832..833be978c3 100755 --- a/scripts/test-remote-env.sh +++ b/scripts/test-remote-env.sh @@ -49,6 +49,10 @@ setup_remote_env() { docker rm -f "${container_name}" >/dev/null 2>&1 || true docker run -d --name "${container_name}" ubuntu:24.04 sleep infinity >/dev/null + if ! docker exec "${container_name}" sh -lc "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y python3 zsh"; then + docker rm -f "${container_name}" >/dev/null 2>&1 || true + return 1 + fi export CODEX_TEST_REMOTE_ENV="${container_name}" }