mirror of
https://github.com/openai/codex.git
synced 2026-02-02 06:57:03 +00:00
Compare commits
42 Commits
jif/basic-
...
pr9176
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
904c88da7b | ||
|
|
2cd1a0a45e | ||
|
|
9f8d3c14ce | ||
|
|
89403c5e11 | ||
|
|
3c711f3d16 | ||
|
|
141d2b5022 | ||
|
|
ebacd28817 | ||
|
|
e25d2ab3bf | ||
|
|
bde734fd1e | ||
|
|
58e8f75b27 | ||
|
|
2651980bdf | ||
|
|
51d75bb80a | ||
|
|
57ba758df5 | ||
|
|
40e2405998 | ||
|
|
fe03320791 | ||
|
|
2d56519ecd | ||
|
|
97f1f20edb | ||
|
|
3b8d79ee11 | ||
|
|
3a300d1117 | ||
|
|
17ab5f6a52 | ||
|
|
3c8fb90bf0 | ||
|
|
325ce985f1 | ||
|
|
18b737910c | ||
|
|
cbca43d57a | ||
|
|
e726a82c8a | ||
|
|
ddae70bd62 | ||
|
|
d75626ad99 | ||
|
|
12779c7c07 | ||
|
|
490c1c1fdd | ||
|
|
87f7226cca | ||
|
|
3a6a43ff5c | ||
|
|
d7cdcfc302 | ||
|
|
5dfa780f3d | ||
|
|
3e91a95ce1 | ||
|
|
034d489c34 | ||
|
|
729e097662 | ||
|
|
7ac498e0e0 | ||
|
|
45ffcdf886 | ||
|
|
06088535ad | ||
|
|
4223948cf5 | ||
|
|
898e5f82f0 | ||
|
|
d5562983d9 |
6
.markdownlint-cli2.yaml
Normal file
6
.markdownlint-cli2.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
config:
|
||||
MD013:
|
||||
line_length: 100
|
||||
|
||||
globs:
|
||||
- "docs/tui-chat-composer.md"
|
||||
@@ -12,7 +12,9 @@ In the codex-rs folder where the rust code lives:
|
||||
- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
|
||||
- Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls
|
||||
- When writing tests, prefer comparing the equality of entire objects over fields one by one.
|
||||
- For multiline string literals, particularly for tests, prefer raw strings (`r#`) to help with readability over string literals full of `\n` metacharacters.
|
||||
- When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable.
|
||||
- If you change `ConfigToml` or nested config types, run `just write-config-schema` to update `codex-rs/core/config.schema.json`.
|
||||
|
||||
Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. Before finalizing a change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspace‑wide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Additionally, run the tests:
|
||||
|
||||
|
||||
15
MODULE.bazel.lock
generated
15
MODULE.bazel.lock
generated
@@ -409,8 +409,8 @@
|
||||
"chrono_0.4.42": "{\"dependencies\":[{\"features\":[\"derive\"],\"name\":\"arbitrary\",\"optional\":true,\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"bincode\",\"req\":\"^1.3.0\"},{\"features\":[\"fallback\"],\"name\":\"iana-time-zone\",\"optional\":true,\"req\":\"^0.1.45\",\"target\":\"cfg(unix)\"},{\"name\":\"js-sys\",\"optional\":true,\"req\":\"^0.3\",\"target\":\"cfg(all(target_arch = \\\"wasm32\\\", not(any(target_os = \\\"emscripten\\\", target_os = \\\"wasi\\\"))))\"},{\"default_features\":false,\"name\":\"num-traits\",\"req\":\"^0.2\"},{\"name\":\"pure-rust-locales\",\"optional\":true,\"req\":\"^0.8\"},{\"default_features\":false,\"name\":\"rkyv\",\"optional\":true,\"req\":\"^0.7.43\"},{\"default_features\":false,\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.99\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"serde_derive\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"similar-asserts\",\"req\":\"^1.6.1\"},{\"name\":\"wasm-bindgen\",\"optional\":true,\"req\":\"^0.2\",\"target\":\"cfg(all(target_arch = \\\"wasm32\\\", not(any(target_os = \\\"emscripten\\\", target_os = \\\"wasi\\\"))))\"},{\"kind\":\"dev\",\"name\":\"wasm-bindgen-test\",\"req\":\"^0.3\",\"target\":\"cfg(all(target_arch = \\\"wasm32\\\", not(any(target_os = \\\"emscripten\\\", target_os = \\\"wasi\\\"))))\"},{\"kind\":\"dev\",\"name\":\"windows-bindgen\",\"req\":\"^0.63\",\"target\":\"cfg(windows)\"},{\"name\":\"windows-link\",\"optional\":true,\"req\":\"^0.2\",\"target\":\"cfg(windows)\"}],\"features\":{\"__internal_bench\":[],\"alloc\":[],\"clock\":[\"winapi\",\"iana-time-zone\",\"now\"],\"core-error\":[],\"default\":[\"clock\",\"std\",\"oldtime\",\"wasmbind\"],\"libc\":[],\"now\":[\"std\"],\"oldtime\":[],\"rkyv\":[\"dep:rkyv\",\"rkyv/size_32\"],\"rkyv-16\":[\"dep:rkyv\",\"rkyv?/size_16\"],\"rkyv-32\":[\"dep:rkyv\",\"rkyv?/size_32\"],\"rkyv-64\":[\"dep:rkyv\",\"rkyv?/size_64\"],\"rkyv-validation\":[\"rkyv?/validation\"],\"std\":[\"alloc\"],\"unstable-locales\":[\"pure-rust-locales\"],\"wasmbind\":[\"wasm-bindgen\",\"js-sys\"],\"winapi\":[\"windows-link\"]}}",
|
||||
"chunked_transfer_1.5.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.3\"}],\"features\":{}}",
|
||||
"cipher_0.4.4": "{\"dependencies\":[{\"name\":\"blobby\",\"optional\":true,\"req\":\"^0.3\"},{\"name\":\"crypto-common\",\"req\":\"^0.1.6\"},{\"name\":\"inout\",\"req\":\"^0.1\"},{\"default_features\":false,\"name\":\"zeroize\",\"optional\":true,\"req\":\"^1.5\"}],\"features\":{\"alloc\":[],\"block-padding\":[\"inout/block-padding\"],\"dev\":[\"blobby\"],\"rand_core\":[\"crypto-common/rand_core\"],\"std\":[\"alloc\",\"crypto-common/std\",\"inout/std\"]}}",
|
||||
"clap_4.5.53": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"automod\",\"req\":\"^1.0.14\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"clap-cargo\",\"req\":\"^0.15.0\"},{\"default_features\":false,\"name\":\"clap_builder\",\"req\":\"=4.5.53\"},{\"name\":\"clap_derive\",\"optional\":true,\"req\":\"=4.5.49\"},{\"kind\":\"dev\",\"name\":\"jiff\",\"req\":\"^0.2.3\"},{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0.15\"},{\"kind\":\"dev\",\"name\":\"semver\",\"req\":\"^1.0.26\"},{\"kind\":\"dev\",\"name\":\"shlex\",\"req\":\"^1.3.0\"},{\"features\":[\"term-svg\"],\"kind\":\"dev\",\"name\":\"snapbox\",\"req\":\"^0.6.16\"},{\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.91\"},{\"default_features\":false,\"features\":[\"color-auto\",\"diff\",\"examples\"],\"kind\":\"dev\",\"name\":\"trycmd\",\"req\":\"^0.15.3\"}],\"features\":{\"cargo\":[\"clap_builder/cargo\"],\"color\":[\"clap_builder/color\"],\"debug\":[\"clap_builder/debug\",\"clap_derive?/debug\"],\"default\":[\"std\",\"color\",\"help\",\"usage\",\"error-context\",\"suggestions\"],\"deprecated\":[\"clap_builder/deprecated\",\"clap_derive?/deprecated\"],\"derive\":[\"dep:clap_derive\"],\"env\":[\"clap_builder/env\"],\"error-context\":[\"clap_builder/error-context\"],\"help\":[\"clap_builder/help\"],\"std\":[\"clap_builder/std\"],\"string\":[\"clap_builder/string\"],\"suggestions\":[\"clap_builder/suggestions\"],\"unicode\":[\"clap_builder/unicode\"],\"unstable-derive-ui-tests\":[],\"unstable-doc\":[\"clap_builder/unstable-doc\",\"derive\"],\"unstable-ext\":[\"clap_builder/unstable-ext\"],\"unstable-markdown\":[\"clap_derive/unstable-markdown\"],\"unstable-styles\":[\"clap_builder/unstable-styles\"],\"unstable-v5\":[\"clap_builder/unstable-v5\",\"clap_derive?/unstable-v5\",\"deprecated\"],\"usage\":[\"clap_builder/usage\"],\"wrap_help\":[\"clap_builder/wrap_help\"]}}",
|
||||
"clap_builder_4.5.53": "{\"dependencies\":[{\"name\":\"anstream\",\"optional\":true,\"req\":\"^0.6.7\"},{\"name\":\"anstyle\",\"req\":\"^1.0.8\"},{\"name\":\"backtrace\",\"optional\":true,\"req\":\"^0.3.73\"},{\"name\":\"clap_lex\",\"req\":\"^0.7.4\"},{\"kind\":\"dev\",\"name\":\"color-print\",\"req\":\"^0.3.6\"},{\"kind\":\"dev\",\"name\":\"snapbox\",\"req\":\"^0.6.16\"},{\"kind\":\"dev\",\"name\":\"static_assertions\",\"req\":\"^1.1.0\"},{\"name\":\"strsim\",\"optional\":true,\"req\":\"^0.11.0\"},{\"name\":\"terminal_size\",\"optional\":true,\"req\":\"^0.4.0\"},{\"kind\":\"dev\",\"name\":\"unic-emoji-char\",\"req\":\"^0.9.0\"},{\"name\":\"unicase\",\"optional\":true,\"req\":\"^2.6.0\"},{\"name\":\"unicode-width\",\"optional\":true,\"req\":\"^0.2.0\"}],\"features\":{\"cargo\":[],\"color\":[\"dep:anstream\"],\"debug\":[\"dep:backtrace\"],\"default\":[\"std\",\"color\",\"help\",\"usage\",\"error-context\",\"suggestions\"],\"deprecated\":[],\"env\":[],\"error-context\":[],\"help\":[],\"std\":[\"anstyle/std\"],\"string\":[],\"suggestions\":[\"dep:strsim\",\"error-context\"],\"unicode\":[\"dep:unicode-width\",\"dep:unicase\"],\"unstable-doc\":[\"cargo\",\"wrap_help\",\"env\",\"unicode\",\"string\",\"unstable-ext\"],\"unstable-ext\":[],\"unstable-styles\":[\"color\"],\"unstable-v5\":[\"deprecated\"],\"usage\":[],\"wrap_help\":[\"help\",\"dep:terminal_size\"]}}",
|
||||
"clap_4.5.54": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"automod\",\"req\":\"^1.0.14\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"clap-cargo\",\"req\":\"^0.15.0\"},{\"default_features\":false,\"name\":\"clap_builder\",\"req\":\"=4.5.54\"},{\"name\":\"clap_derive\",\"optional\":true,\"req\":\"=4.5.49\"},{\"kind\":\"dev\",\"name\":\"jiff\",\"req\":\"^0.2.3\"},{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0.15\"},{\"kind\":\"dev\",\"name\":\"semver\",\"req\":\"^1.0.26\"},{\"kind\":\"dev\",\"name\":\"shlex\",\"req\":\"^1.3.0\"},{\"features\":[\"term-svg\"],\"kind\":\"dev\",\"name\":\"snapbox\",\"req\":\"^0.6.16\"},{\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.91\"},{\"default_features\":false,\"features\":[\"color-auto\",\"diff\",\"examples\"],\"kind\":\"dev\",\"name\":\"trycmd\",\"req\":\"^0.15.3\"}],\"features\":{\"cargo\":[\"clap_builder/cargo\"],\"color\":[\"clap_builder/color\"],\"debug\":[\"clap_builder/debug\",\"clap_derive?/debug\"],\"default\":[\"std\",\"color\",\"help\",\"usage\",\"error-context\",\"suggestions\"],\"deprecated\":[\"clap_builder/deprecated\",\"clap_derive?/deprecated\"],\"derive\":[\"dep:clap_derive\"],\"env\":[\"clap_builder/env\"],\"error-context\":[\"clap_builder/error-context\"],\"help\":[\"clap_builder/help\"],\"std\":[\"clap_builder/std\"],\"string\":[\"clap_builder/string\"],\"suggestions\":[\"clap_builder/suggestions\"],\"unicode\":[\"clap_builder/unicode\"],\"unstable-derive-ui-tests\":[],\"unstable-doc\":[\"clap_builder/unstable-doc\",\"derive\"],\"unstable-ext\":[\"clap_builder/unstable-ext\"],\"unstable-markdown\":[\"clap_derive/unstable-markdown\"],\"unstable-styles\":[\"clap_builder/unstable-styles\"],\"unstable-v5\":[\"clap_builder/unstable-v5\",\"clap_derive?/unstable-v5\",\"deprecated\"],\"usage\":[\"clap_builder/usage\"],\"wrap_help\":[\"clap_builder/wrap_help\"]}}",
|
||||
"clap_builder_4.5.54": "{\"dependencies\":[{\"name\":\"anstream\",\"optional\":true,\"req\":\"^0.6.7\"},{\"name\":\"anstyle\",\"req\":\"^1.0.8\"},{\"name\":\"backtrace\",\"optional\":true,\"req\":\"^0.3.73\"},{\"name\":\"clap_lex\",\"req\":\"^0.7.4\"},{\"kind\":\"dev\",\"name\":\"color-print\",\"req\":\"^0.3.6\"},{\"kind\":\"dev\",\"name\":\"snapbox\",\"req\":\"^0.6.16\"},{\"kind\":\"dev\",\"name\":\"static_assertions\",\"req\":\"^1.1.0\"},{\"name\":\"strsim\",\"optional\":true,\"req\":\"^0.11.0\"},{\"name\":\"terminal_size\",\"optional\":true,\"req\":\"^0.4.0\"},{\"kind\":\"dev\",\"name\":\"unic-emoji-char\",\"req\":\"^0.9.0\"},{\"name\":\"unicase\",\"optional\":true,\"req\":\"^2.6.0\"},{\"name\":\"unicode-width\",\"optional\":true,\"req\":\"^0.2.0\"}],\"features\":{\"cargo\":[],\"color\":[\"dep:anstream\"],\"debug\":[\"dep:backtrace\"],\"default\":[\"std\",\"color\",\"help\",\"usage\",\"error-context\",\"suggestions\"],\"deprecated\":[],\"env\":[],\"error-context\":[],\"help\":[],\"std\":[\"anstyle/std\"],\"string\":[],\"suggestions\":[\"dep:strsim\",\"error-context\"],\"unicode\":[\"dep:unicode-width\",\"dep:unicase\"],\"unstable-doc\":[\"cargo\",\"wrap_help\",\"env\",\"unicode\",\"string\",\"unstable-ext\"],\"unstable-ext\":[],\"unstable-styles\":[\"color\"],\"unstable-v5\":[\"deprecated\"],\"usage\":[],\"wrap_help\":[\"help\",\"dep:terminal_size\"]}}",
|
||||
"clap_complete_4.5.64": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"automod\",\"req\":\"^1.0.14\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"clap\",\"req\":\"^4.5.20\"},{\"default_features\":false,\"features\":[\"std\",\"derive\",\"help\"],\"kind\":\"dev\",\"name\":\"clap\",\"req\":\"^4.5.20\"},{\"name\":\"clap_lex\",\"optional\":true,\"req\":\"^0.7.0\"},{\"name\":\"completest\",\"optional\":true,\"req\":\"^0.4.2\"},{\"name\":\"completest-pty\",\"optional\":true,\"req\":\"^0.5.5\"},{\"name\":\"is_executable\",\"optional\":true,\"req\":\"^1.0.1\"},{\"name\":\"shlex\",\"optional\":true,\"req\":\"^1.3.0\"},{\"features\":[\"diff\",\"dir\",\"examples\"],\"kind\":\"dev\",\"name\":\"snapbox\",\"req\":\"^0.6.0\"},{\"default_features\":false,\"features\":[\"color-auto\",\"diff\",\"examples\"],\"kind\":\"dev\",\"name\":\"trycmd\",\"req\":\"^0.15.1\"}],\"features\":{\"debug\":[\"clap/debug\"],\"default\":[],\"unstable-doc\":[\"unstable-dynamic\"],\"unstable-dynamic\":[\"dep:clap_lex\",\"dep:shlex\",\"dep:is_executable\",\"clap/unstable-ext\"],\"unstable-shell-tests\":[\"dep:completest\",\"dep:completest-pty\"]}}",
|
||||
"clap_derive_4.5.49": "{\"dependencies\":[{\"name\":\"anstyle\",\"optional\":true,\"req\":\"^1.0.10\"},{\"name\":\"heck\",\"req\":\"^0.5.0\"},{\"name\":\"proc-macro2\",\"req\":\"^1.0.69\"},{\"default_features\":false,\"name\":\"pulldown-cmark\",\"optional\":true,\"req\":\"^0.13.0\"},{\"name\":\"quote\",\"req\":\"^1.0.9\"},{\"features\":[\"full\"],\"name\":\"syn\",\"req\":\"^2.0.8\"}],\"features\":{\"debug\":[],\"default\":[],\"deprecated\":[],\"raw-deprecated\":[\"deprecated\"],\"unstable-markdown\":[\"dep:pulldown-cmark\",\"dep:anstyle\"],\"unstable-v5\":[\"deprecated\"]}}",
|
||||
"clap_lex_0.7.5": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"automod\",\"req\":\"^1.0.14\"}],\"features\":{}}",
|
||||
@@ -495,6 +495,7 @@
|
||||
"enumflags2_derive_0.7.12": "{\"dependencies\":[{\"name\":\"proc-macro2\",\"req\":\"^1.0\"},{\"name\":\"quote\",\"req\":\"^1.0\"},{\"default_features\":false,\"features\":[\"parsing\",\"printing\",\"derive\",\"proc-macro\"],\"name\":\"syn\",\"req\":\"^2.0\"}],\"features\":{}}",
|
||||
"env-flags_0.1.1": "{\"dependencies\":[],\"features\":{}}",
|
||||
"env_filter_0.1.3": "{\"dependencies\":[{\"features\":[\"std\"],\"name\":\"log\",\"req\":\"^0.4.8\"},{\"default_features\":false,\"features\":[\"std\",\"perf\"],\"name\":\"regex\",\"optional\":true,\"req\":\"^1.0.3\"},{\"kind\":\"dev\",\"name\":\"snapbox\",\"req\":\"^0.6\"}],\"features\":{\"default\":[\"regex\"],\"regex\":[\"dep:regex\"]}}",
|
||||
"env_home_0.1.0": "{\"dependencies\":[],\"features\":{}}",
|
||||
"env_logger_0.11.8": "{\"dependencies\":[{\"default_features\":false,\"features\":[\"wincon\"],\"name\":\"anstream\",\"optional\":true,\"req\":\"^0.6.11\"},{\"name\":\"anstyle\",\"optional\":true,\"req\":\"^1.0.6\"},{\"default_features\":false,\"name\":\"env_filter\",\"req\":\"^0.1.0\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"jiff\",\"optional\":true,\"req\":\"^0.2.3\"},{\"features\":[\"std\"],\"name\":\"log\",\"req\":\"^0.4.21\"}],\"features\":{\"auto-color\":[\"color\",\"anstream/auto\"],\"color\":[\"dep:anstream\",\"dep:anstyle\"],\"default\":[\"auto-color\",\"humantime\",\"regex\"],\"humantime\":[\"dep:jiff\"],\"kv\":[\"log/kv\"],\"regex\":[\"env_filter/regex\"],\"unstable-kv\":[\"kv\"]}}",
|
||||
"equivalent_1.0.2": "{\"dependencies\":[],\"features\":{}}",
|
||||
"erased-serde_0.3.31": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"rustversion\",\"req\":\"^1.0.13\"},{\"default_features\":false,\"name\":\"serde\",\"req\":\"^1.0.166\"},{\"kind\":\"dev\",\"name\":\"serde_cbor\",\"req\":\"^0.11.2\"},{\"kind\":\"dev\",\"name\":\"serde_derive\",\"req\":\"^1.0.166\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0.99\"},{\"features\":[\"diff\"],\"kind\":\"dev\",\"name\":\"trybuild\",\"req\":\"^1.0.83\"}],\"features\":{\"alloc\":[\"serde/alloc\"],\"default\":[\"std\"],\"std\":[\"serde/std\"],\"unstable-debug\":[]}}",
|
||||
@@ -905,7 +906,7 @@
|
||||
"tokio-rustls_0.26.2": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"argh\",\"req\":\"^0.1.1\"},{\"kind\":\"dev\",\"name\":\"futures-util\",\"req\":\"^0.3.1\"},{\"kind\":\"dev\",\"name\":\"lazy_static\",\"req\":\"^1.1\"},{\"features\":[\"pem\"],\"kind\":\"dev\",\"name\":\"rcgen\",\"req\":\"^0.13\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"rustls\",\"req\":\"^0.23.22\"},{\"name\":\"tokio\",\"req\":\"^1.0\"},{\"features\":[\"full\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"webpki-roots\",\"req\":\"^0.26\"}],\"features\":{\"aws-lc-rs\":[\"aws_lc_rs\"],\"aws_lc_rs\":[\"rustls/aws_lc_rs\"],\"default\":[\"logging\",\"tls12\",\"aws_lc_rs\"],\"early-data\":[],\"fips\":[\"rustls/fips\"],\"logging\":[\"rustls/logging\"],\"ring\":[\"rustls/ring\"],\"tls12\":[\"rustls/tls12\"]}}",
|
||||
"tokio-stream_0.1.18": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"async-stream\",\"req\":\"^0.3\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"futures\",\"req\":\"^0.3\"},{\"name\":\"futures-core\",\"req\":\"^0.3.0\"},{\"kind\":\"dev\",\"name\":\"parking_lot\",\"req\":\"^0.12.0\"},{\"name\":\"pin-project-lite\",\"req\":\"^0.2.11\"},{\"features\":[\"sync\"],\"name\":\"tokio\",\"req\":\"^1.15.0\"},{\"features\":[\"full\",\"test-util\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.2.0\"},{\"kind\":\"dev\",\"name\":\"tokio-test\",\"req\":\"^0.4\"},{\"name\":\"tokio-util\",\"optional\":true,\"req\":\"^0.7.0\"}],\"features\":{\"default\":[\"time\"],\"fs\":[\"tokio/fs\"],\"full\":[\"time\",\"net\",\"io-util\",\"fs\",\"sync\",\"signal\"],\"io-util\":[\"tokio/io-util\"],\"net\":[\"tokio/net\"],\"signal\":[\"tokio/signal\"],\"sync\":[\"tokio/sync\",\"tokio-util\"],\"time\":[\"tokio/time\"]}}",
|
||||
"tokio-test_0.4.4": "{\"dependencies\":[{\"name\":\"async-stream\",\"req\":\"^0.3.3\"},{\"name\":\"bytes\",\"req\":\"^1.0.0\"},{\"name\":\"futures-core\",\"req\":\"^0.3.0\"},{\"kind\":\"dev\",\"name\":\"futures-util\",\"req\":\"^0.3.0\"},{\"features\":[\"rt\",\"sync\",\"time\",\"test-util\"],\"name\":\"tokio\",\"req\":\"^1.2.0\"},{\"features\":[\"full\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.2.0\"},{\"name\":\"tokio-stream\",\"req\":\"^0.1.1\"}],\"features\":{}}",
|
||||
"tokio-util_0.7.16": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"async-stream\",\"req\":\"^0.3.0\"},{\"name\":\"bytes\",\"req\":\"^1.5.0\"},{\"kind\":\"dev\",\"name\":\"futures\",\"req\":\"^0.3.0\"},{\"name\":\"futures-core\",\"req\":\"^0.3.0\"},{\"name\":\"futures-io\",\"optional\":true,\"req\":\"^0.3.0\"},{\"name\":\"futures-sink\",\"req\":\"^0.3.0\"},{\"kind\":\"dev\",\"name\":\"futures-test\",\"req\":\"^0.3.5\"},{\"name\":\"futures-util\",\"optional\":true,\"req\":\"^0.3.0\"},{\"default_features\":false,\"name\":\"hashbrown\",\"optional\":true,\"req\":\"^0.15.0\"},{\"kind\":\"dev\",\"name\":\"parking_lot\",\"req\":\"^0.12.0\"},{\"name\":\"pin-project-lite\",\"req\":\"^0.2.11\"},{\"name\":\"slab\",\"optional\":true,\"req\":\"^0.4.4\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.1.0\"},{\"features\":[\"sync\"],\"name\":\"tokio\",\"req\":\"^1.28.0\"},{\"features\":[\"full\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"tokio-stream\",\"req\":\"^0.1\"},{\"kind\":\"dev\",\"name\":\"tokio-test\",\"req\":\"^0.4.0\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"tracing\",\"optional\":true,\"req\":\"^0.1.29\"}],\"features\":{\"__docs_rs\":[\"futures-util\"],\"codec\":[],\"compat\":[\"futures-io\"],\"default\":[],\"full\":[\"codec\",\"compat\",\"io-util\",\"time\",\"net\",\"rt\",\"join-map\"],\"io\":[],\"io-util\":[\"io\",\"tokio/rt\",\"tokio/io-util\"],\"join-map\":[\"rt\",\"hashbrown\"],\"net\":[\"tokio/net\"],\"rt\":[\"tokio/rt\",\"tokio/sync\",\"futures-util\"],\"time\":[\"tokio/time\",\"slab\"]}}",
|
||||
"tokio-util_0.7.18": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"async-stream\",\"req\":\"^0.3.0\"},{\"name\":\"bytes\",\"req\":\"^1.5.0\"},{\"kind\":\"dev\",\"name\":\"futures\",\"req\":\"^0.3.0\"},{\"name\":\"futures-core\",\"req\":\"^0.3.0\"},{\"name\":\"futures-io\",\"optional\":true,\"req\":\"^0.3.0\"},{\"name\":\"futures-sink\",\"req\":\"^0.3.0\"},{\"kind\":\"dev\",\"name\":\"futures-test\",\"req\":\"^0.3.5\"},{\"name\":\"futures-util\",\"optional\":true,\"req\":\"^0.3.0\"},{\"default_features\":false,\"name\":\"hashbrown\",\"optional\":true,\"req\":\"^0.15.0\"},{\"features\":[\"futures\",\"checkpoint\"],\"kind\":\"dev\",\"name\":\"loom\",\"req\":\"^0.7\",\"target\":\"cfg(loom)\"},{\"kind\":\"dev\",\"name\":\"parking_lot\",\"req\":\"^0.12.0\"},{\"name\":\"pin-project-lite\",\"req\":\"^0.2.11\"},{\"name\":\"slab\",\"optional\":true,\"req\":\"^0.4.4\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.1.0\"},{\"features\":[\"sync\"],\"name\":\"tokio\",\"req\":\"^1.44.0\"},{\"features\":[\"full\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"tokio-stream\",\"req\":\"^0.1\"},{\"kind\":\"dev\",\"name\":\"tokio-test\",\"req\":\"^0.4.0\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"tracing\",\"optional\":true,\"req\":\"^0.1.29\"}],\"features\":{\"__docs_rs\":[\"futures-util\"],\"codec\":[],\"compat\":[\"futures-io\"],\"default\":[],\"full\":[\"codec\",\"compat\",\"io-util\",\"time\",\"net\",\"rt\",\"join-map\"],\"io\":[],\"io-util\":[\"io\",\"tokio/rt\",\"tokio/io-util\"],\"join-map\":[\"rt\",\"hashbrown\"],\"net\":[\"tokio/net\"],\"rt\":[\"tokio/rt\",\"tokio/sync\",\"futures-util\"],\"time\":[\"tokio/time\",\"slab\"]}}",
|
||||
"tokio_1.48.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"async-stream\",\"req\":\"^0.3\"},{\"name\":\"backtrace\",\"optional\":true,\"req\":\"^0.3.58\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"name\":\"bytes\",\"optional\":true,\"req\":\"^1.2.1\"},{\"features\":[\"async-await\"],\"kind\":\"dev\",\"name\":\"futures\",\"req\":\"^0.3.0\"},{\"kind\":\"dev\",\"name\":\"futures-concurrency\",\"req\":\"^7.6.3\"},{\"default_features\":false,\"name\":\"io-uring\",\"optional\":true,\"req\":\"^0.7.6\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"name\":\"libc\",\"optional\":true,\"req\":\"^0.2.168\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"name\":\"libc\",\"optional\":true,\"req\":\"^0.2.168\",\"target\":\"cfg(unix)\"},{\"kind\":\"dev\",\"name\":\"libc\",\"req\":\"^0.2.168\",\"target\":\"cfg(unix)\"},{\"features\":[\"futures\",\"checkpoint\"],\"kind\":\"dev\",\"name\":\"loom\",\"req\":\"^0.7\",\"target\":\"cfg(loom)\"},{\"default_features\":false,\"name\":\"mio\",\"optional\":true,\"req\":\"^1.0.1\"},{\"default_features\":false,\"features\":[\"os-poll\",\"os-ext\"],\"name\":\"mio\",\"optional\":true,\"req\":\"^1.0.1\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"features\":[\"tokio\"],\"kind\":\"dev\",\"name\":\"mio-aio\",\"req\":\"^1\",\"target\":\"cfg(target_os = \\\"freebsd\\\")\"},{\"kind\":\"dev\",\"name\":\"mockall\",\"req\":\"^0.13.0\"},{\"default_features\":false,\"features\":[\"aio\",\"fs\",\"socket\"],\"kind\":\"dev\",\"name\":\"nix\",\"req\":\"^0.29.0\",\"target\":\"cfg(unix)\"},{\"name\":\"parking_lot\",\"optional\":true,\"req\":\"^0.12.0\"},{\"name\":\"pin-project-lite\",\"req\":\"^0.2.11\"},{\"kind\":\"dev\",\"name\":\"proptest\",\"req\":\"^1\",\"target\":\"cfg(not(target_family = \\\"wasm\\\"))\"},{\"kind\":\"dev\",\"name\":\"rand\",\"req\":\"^0.9\",\"target\":\"cfg(not(all(target_family = \\\"wasm\\\", target_os = \\\"unknown\\\")))\"},{\"name\":\"signal-hook-registry\",\"optional\":true,\"req\":\"^1.1.1\",\"target\":\"cfg(unix)\"},{\"name\":\"slab\",\"optional\":true,\"req\":\"^0.4.9\",\"target\":\"cfg(all(tokio_unstable, target_os = \\\"linux\\\"))\"},{\"features\":[\"all\"],\"name\":\"socket2\",\"optional\":true,\"req\":\"^0.6.0\",\"target\":\"cfg(not(target_family = \\\"wasm\\\"))\"},{\"kind\":\"dev\",\"name\":\"socket2\",\"req\":\"^0.6.0\",\"target\":\"cfg(not(target_family = \\\"wasm\\\"))\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.1.0\",\"target\":\"cfg(not(target_family = \\\"wasm\\\"))\"},{\"name\":\"tokio-macros\",\"optional\":true,\"req\":\"~2.6.0\"},{\"kind\":\"dev\",\"name\":\"tokio-stream\",\"req\":\"^0.1\"},{\"kind\":\"dev\",\"name\":\"tokio-test\",\"req\":\"^0.4.0\"},{\"features\":[\"rt\"],\"kind\":\"dev\",\"name\":\"tokio-util\",\"req\":\"^0.7\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"tracing\",\"optional\":true,\"req\":\"^0.1.29\",\"target\":\"cfg(tokio_unstable)\"},{\"kind\":\"dev\",\"name\":\"tracing-mock\",\"req\":\"=0.1.0-beta.1\",\"target\":\"cfg(all(tokio_unstable, target_has_atomic = \\\"64\\\"))\"},{\"kind\":\"dev\",\"name\":\"wasm-bindgen-test\",\"req\":\"^0.3.0\",\"target\":\"cfg(all(target_family = \\\"wasm\\\", not(target_os = \\\"wasi\\\")))\"},{\"name\":\"windows-sys\",\"optional\":true,\"req\":\"^0.61\",\"target\":\"cfg(windows)\"},{\"features\":[\"Win32_Foundation\",\"Win32_Security_Authorization\"],\"kind\":\"dev\",\"name\":\"windows-sys\",\"req\":\"^0.61\",\"target\":\"cfg(windows)\"}],\"features\":{\"default\":[],\"fs\":[],\"full\":[\"fs\",\"io-util\",\"io-std\",\"macros\",\"net\",\"parking_lot\",\"process\",\"rt\",\"rt-multi-thread\",\"signal\",\"sync\",\"time\"],\"io-std\":[],\"io-uring\":[\"dep:io-uring\",\"libc\",\"mio/os-poll\",\"mio/os-ext\",\"dep:slab\"],\"io-util\":[\"bytes\"],\"macros\":[\"tokio-macros\"],\"net\":[\"libc\",\"mio/os-poll\",\"mio/os-ext\",\"mio/net\",\"socket2\",\"windows-sys/Win32_Foundation\",\"windows-sys/Win32_Security\",\"windows-sys/Win32_Storage_FileSystem\",\"windows-sys/Win32_System_Pipes\",\"windows-sys/Win32_System_SystemServices\"],\"process\":[\"bytes\",\"libc\",\"mio/os-poll\",\"mio/os-ext\",\"mio/net\",\"signal-hook-registry\",\"windows-sys/Win32_Foundation\",\"windows-sys/Win32_System_Threading\",\"windows-sys/Win32_System_WindowsProgramming\"],\"rt\":[],\"rt-multi-thread\":[\"rt\"],\"signal\":[\"libc\",\"mio/os-poll\",\"mio/net\",\"mio/os-ext\",\"signal-hook-registry\",\"windows-sys/Win32_Foundation\",\"windows-sys/Win32_System_Console\"],\"sync\":[],\"taskdump\":[\"dep:backtrace\"],\"test-util\":[\"rt\",\"sync\",\"time\"],\"time\":[]}}",
|
||||
"toml_0.5.11": "{\"dependencies\":[{\"name\":\"indexmap\",\"optional\":true,\"req\":\"^1.0\"},{\"name\":\"serde\",\"req\":\"^1.0.97\"},{\"kind\":\"dev\",\"name\":\"serde_derive\",\"req\":\"^1.0\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0\"}],\"features\":{\"default\":[],\"preserve_order\":[\"indexmap\"]}}",
|
||||
"toml_0.9.5": "{\"dependencies\":[{\"name\":\"anstream\",\"optional\":true,\"req\":\"^0.6.15\"},{\"name\":\"anstyle\",\"optional\":true,\"req\":\"^1.0.8\"},{\"default_features\":false,\"name\":\"foldhash\",\"optional\":true,\"req\":\"^0.1.5\"},{\"default_features\":false,\"name\":\"indexmap\",\"optional\":true,\"req\":\"^2.3.0\"},{\"kind\":\"dev\",\"name\":\"itertools\",\"req\":\"^0.14.0\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0.145\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0.199\"},{\"kind\":\"dev\",\"name\":\"serde-untagged\",\"req\":\"^0.1.7\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1.0.116\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"serde_spanned\",\"req\":\"^1.0.0\"},{\"kind\":\"dev\",\"name\":\"snapbox\",\"req\":\"^0.6.0\"},{\"kind\":\"dev\",\"name\":\"toml-test-data\",\"req\":\"^2.3.0\"},{\"features\":[\"snapshot\"],\"kind\":\"dev\",\"name\":\"toml-test-harness\",\"req\":\"^1.3.2\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"toml_datetime\",\"req\":\"^0.7.0\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"toml_parser\",\"optional\":true,\"req\":\"^1.0.2\"},{\"default_features\":false,\"features\":[\"alloc\"],\"name\":\"toml_writer\",\"optional\":true,\"req\":\"^1.0.2\"},{\"kind\":\"dev\",\"name\":\"walkdir\",\"req\":\"^2.5.0\"},{\"default_features\":false,\"name\":\"winnow\",\"optional\":true,\"req\":\"^0.7.10\"}],\"features\":{\"debug\":[\"std\",\"toml_parser?/debug\",\"dep:anstream\",\"dep:anstyle\"],\"default\":[\"std\",\"serde\",\"parse\",\"display\"],\"display\":[\"dep:toml_writer\"],\"fast_hash\":[\"preserve_order\",\"dep:foldhash\"],\"parse\":[\"dep:toml_parser\",\"dep:winnow\"],\"preserve_order\":[\"dep:indexmap\",\"std\"],\"serde\":[\"dep:serde\",\"toml_datetime/serde\",\"serde_spanned/serde\"],\"std\":[\"indexmap?/std\",\"serde?/std\",\"toml_parser?/std\",\"toml_writer?/std\",\"toml_datetime/std\",\"serde_spanned/std\"],\"unbounded\":[]}}",
|
||||
@@ -936,9 +937,9 @@
|
||||
"tree-sitter_0.25.10": "{\"dependencies\":[{\"kind\":\"build\",\"name\":\"bindgen\",\"optional\":true,\"req\":\"^0.71.1\"},{\"kind\":\"build\",\"name\":\"cc\",\"req\":\"^1.2.10\"},{\"default_features\":false,\"features\":[\"unicode\"],\"name\":\"regex\",\"req\":\"^1.11.1\"},{\"default_features\":false,\"name\":\"regex-syntax\",\"req\":\"^0.8.5\"},{\"features\":[\"preserve_order\"],\"kind\":\"build\",\"name\":\"serde_json\",\"req\":\"^1.0.137\"},{\"name\":\"streaming-iterator\",\"req\":\"^0.1.9\"},{\"name\":\"tree-sitter-language\",\"req\":\"^0.1\"},{\"default_features\":false,\"features\":[\"cranelift\",\"gc-drc\"],\"name\":\"wasmtime-c-api\",\"optional\":true,\"package\":\"wasmtime-c-api-impl\",\"req\":\"^29.0.1\"}],\"features\":{\"default\":[\"std\"],\"std\":[\"regex/std\",\"regex/perf\",\"regex-syntax/unicode\"],\"wasm\":[\"std\",\"wasmtime-c-api\"]}}",
|
||||
"tree_magic_mini_3.2.0": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"bencher\",\"req\":\"^0.1.0\"},{\"name\":\"memchr\",\"req\":\"^2.0\"},{\"name\":\"nom\",\"req\":\"^7.0\"},{\"name\":\"once_cell\",\"req\":\"^1.0\"},{\"name\":\"petgraph\",\"req\":\"^0.6.0\"},{\"name\":\"tree_magic_db\",\"optional\":true,\"req\":\"^3.0\"}],\"features\":{\"with-gpl-data\":[\"dep:tree_magic_db\"]}}",
|
||||
"try-lock_0.2.5": "{\"dependencies\":[],\"features\":{}}",
|
||||
"ts-rs-macros_11.0.1": "{\"dependencies\":[{\"name\":\"proc-macro2\",\"req\":\"^1\"},{\"name\":\"quote\",\"req\":\"^1\"},{\"features\":[\"full\",\"extra-traits\"],\"name\":\"syn\",\"req\":\"^2.0.28\"},{\"name\":\"termcolor\",\"optional\":true,\"req\":\"^1\"}],\"features\":{\"no-serde-warnings\":[],\"serde-compat\":[\"termcolor\"]}}",
|
||||
"ts-rs_11.0.1": "{\"dependencies\":[{\"features\":[\"serde\"],\"name\":\"bigdecimal\",\"optional\":true,\"req\":\">=0.0.13, <0.5\"},{\"name\":\"bson\",\"optional\":true,\"req\":\"^2\"},{\"name\":\"bytes\",\"optional\":true,\"req\":\"^1\"},{\"name\":\"chrono\",\"optional\":true,\"req\":\"^0.4\"},{\"features\":[\"serde\"],\"kind\":\"dev\",\"name\":\"chrono\",\"req\":\"^0.4\"},{\"name\":\"dprint-plugin-typescript\",\"optional\":true,\"req\":\"^0.90\"},{\"name\":\"heapless\",\"optional\":true,\"req\":\">=0.7, <0.9\"},{\"name\":\"indexmap\",\"optional\":true,\"req\":\"^2\"},{\"name\":\"ordered-float\",\"optional\":true,\"req\":\">=3, <6\"},{\"name\":\"semver\",\"optional\":true,\"req\":\"^1\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0\"},{\"name\":\"serde_json\",\"optional\":true,\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1\"},{\"name\":\"smol_str\",\"optional\":true,\"req\":\"^0.3\"},{\"name\":\"thiserror\",\"req\":\"^2\"},{\"features\":[\"sync\"],\"name\":\"tokio\",\"optional\":true,\"req\":\"^1\"},{\"features\":[\"sync\",\"rt\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.40\"},{\"name\":\"ts-rs-macros\",\"req\":\"=11.0.1\"},{\"name\":\"url\",\"optional\":true,\"req\":\"^2\"},{\"name\":\"uuid\",\"optional\":true,\"req\":\"^1\"}],\"features\":{\"bigdecimal-impl\":[\"bigdecimal\"],\"bson-uuid-impl\":[\"bson\"],\"bytes-impl\":[\"bytes\"],\"chrono-impl\":[\"chrono\"],\"default\":[\"serde-compat\"],\"format\":[\"dprint-plugin-typescript\"],\"heapless-impl\":[\"heapless\"],\"import-esm\":[],\"indexmap-impl\":[\"indexmap\"],\"no-serde-warnings\":[\"ts-rs-macros/no-serde-warnings\"],\"ordered-float-impl\":[\"ordered-float\"],\"semver-impl\":[\"semver\"],\"serde-compat\":[\"ts-rs-macros/serde-compat\"],\"serde-json-impl\":[\"serde_json\"],\"smol_str-impl\":[\"smol_str\"],\"tokio-impl\":[\"tokio\"],\"url-impl\":[\"url\"],\"uuid-impl\":[\"uuid\"]}}",
|
||||
"tui-scrollbar_0.2.1": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"color-eyre\",\"req\":\"^0.6\"},{\"name\":\"crossterm\",\"optional\":true,\"req\":\"^0.29\"},{\"name\":\"document-features\",\"req\":\"^0.2.11\"},{\"kind\":\"dev\",\"name\":\"ratatui\",\"req\":\"^0.30.0\"},{\"name\":\"ratatui-core\",\"req\":\"^0.1\"}],\"features\":{\"crossterm\":[\"dep:crossterm\"]}}",
|
||||
"ts-rs-macros_11.1.0": "{\"dependencies\":[{\"name\":\"proc-macro2\",\"req\":\"^1\"},{\"name\":\"quote\",\"req\":\"^1\"},{\"features\":[\"full\",\"extra-traits\"],\"name\":\"syn\",\"req\":\"^2.0.28\"},{\"name\":\"termcolor\",\"optional\":true,\"req\":\"^1\"}],\"features\":{\"no-serde-warnings\":[],\"serde-compat\":[\"termcolor\"]}}",
|
||||
"ts-rs_11.1.0": "{\"dependencies\":[{\"features\":[\"serde\"],\"name\":\"bigdecimal\",\"optional\":true,\"req\":\">=0.0.13, <0.5\"},{\"name\":\"bson\",\"optional\":true,\"req\":\"^2\"},{\"name\":\"bytes\",\"optional\":true,\"req\":\"^1\"},{\"name\":\"chrono\",\"optional\":true,\"req\":\"^0.4\"},{\"features\":[\"serde\"],\"kind\":\"dev\",\"name\":\"chrono\",\"req\":\"^0.4\"},{\"name\":\"dprint-plugin-typescript\",\"optional\":true,\"req\":\"=0.95\"},{\"name\":\"heapless\",\"optional\":true,\"req\":\">=0.7, <0.9\"},{\"name\":\"indexmap\",\"optional\":true,\"req\":\"^2\"},{\"name\":\"ordered-float\",\"optional\":true,\"req\":\">=3, <6\"},{\"name\":\"semver\",\"optional\":true,\"req\":\"^1\"},{\"features\":[\"derive\"],\"kind\":\"dev\",\"name\":\"serde\",\"req\":\"^1.0\"},{\"name\":\"serde_json\",\"optional\":true,\"req\":\"^1\"},{\"kind\":\"dev\",\"name\":\"serde_json\",\"req\":\"^1\"},{\"name\":\"smol_str\",\"optional\":true,\"req\":\"^0.3\"},{\"name\":\"thiserror\",\"req\":\"^2\"},{\"features\":[\"sync\"],\"name\":\"tokio\",\"optional\":true,\"req\":\"^1\"},{\"features\":[\"sync\",\"rt\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1.40\"},{\"name\":\"ts-rs-macros\",\"req\":\"=11.1.0\"},{\"name\":\"url\",\"optional\":true,\"req\":\"^2\"},{\"name\":\"uuid\",\"optional\":true,\"req\":\"^1\"}],\"features\":{\"bigdecimal-impl\":[\"bigdecimal\"],\"bson-uuid-impl\":[\"bson\"],\"bytes-impl\":[\"bytes\"],\"chrono-impl\":[\"chrono\"],\"default\":[\"serde-compat\"],\"format\":[\"dprint-plugin-typescript\"],\"heapless-impl\":[\"heapless\"],\"import-esm\":[],\"indexmap-impl\":[\"indexmap\"],\"no-serde-warnings\":[\"ts-rs-macros/no-serde-warnings\"],\"ordered-float-impl\":[\"ordered-float\"],\"semver-impl\":[\"semver\"],\"serde-compat\":[\"ts-rs-macros/serde-compat\"],\"serde-json-impl\":[\"serde_json\"],\"smol_str-impl\":[\"smol_str\"],\"tokio-impl\":[\"tokio\"],\"url-impl\":[\"url\"],\"uuid-impl\":[\"uuid\"]}}",
|
||||
"tui-scrollbar_0.2.2": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"color-eyre\",\"req\":\"^0.6\"},{\"name\":\"crossterm_0_28\",\"optional\":true,\"package\":\"crossterm\",\"req\":\"^0.28\"},{\"name\":\"crossterm_0_29\",\"optional\":true,\"package\":\"crossterm\",\"req\":\"^0.29\"},{\"name\":\"document-features\",\"req\":\"^0.2.11\"},{\"kind\":\"dev\",\"name\":\"ratatui\",\"req\":\"^0.30.0\"},{\"name\":\"ratatui-core\",\"req\":\"^0.1\"}],\"features\":{\"crossterm\":[\"crossterm_0_29\"],\"crossterm_0_28\":[\"dep:crossterm_0_28\"],\"crossterm_0_29\":[\"dep:crossterm_0_29\"],\"default\":[]}}",
|
||||
"typenum_1.18.0": "{\"dependencies\":[{\"default_features\":false,\"name\":\"scale-info\",\"optional\":true,\"req\":\"^1.0\"}],\"features\":{\"const-generics\":[],\"force_unix_path_separator\":[],\"i128\":[],\"no_std\":[],\"scale_info\":[\"scale-info/derive\"],\"strict\":[]}}",
|
||||
"uds_windows_1.1.0": "{\"dependencies\":[{\"name\":\"memoffset\",\"req\":\"^0.9.0\"},{\"name\":\"tempfile\",\"req\":\"^3\",\"target\":\"cfg(windows)\"},{\"features\":[\"winsock2\",\"ws2def\",\"minwinbase\",\"ntdef\",\"processthreadsapi\",\"handleapi\",\"ws2tcpip\",\"winbase\"],\"name\":\"winapi\",\"req\":\"^0.3.9\",\"target\":\"cfg(windows)\"}],\"features\":{}}",
|
||||
"uname_0.1.1": "{\"dependencies\":[{\"name\":\"libc\",\"req\":\"^0.2\"}],\"features\":{}}",
|
||||
@@ -991,7 +992,7 @@
|
||||
"webpki-root-certs_1.0.4": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"hex\",\"req\":\"^0.4.3\"},{\"kind\":\"dev\",\"name\":\"percent-encoding\",\"req\":\"^2.3\"},{\"default_features\":false,\"name\":\"pki-types\",\"package\":\"rustls-pki-types\",\"req\":\"^1.8\"},{\"kind\":\"dev\",\"name\":\"ring\",\"req\":\"^0.17.0\"},{\"features\":[\"macros\",\"rt-multi-thread\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1\"},{\"features\":[\"alloc\"],\"kind\":\"dev\",\"name\":\"webpki\",\"package\":\"rustls-webpki\",\"req\":\"^0.103\"},{\"kind\":\"dev\",\"name\":\"x509-parser\",\"req\":\"^0.17.0\"}],\"features\":{}}",
|
||||
"webpki-roots_1.0.2": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"hex\",\"req\":\"^0.4.3\"},{\"kind\":\"dev\",\"name\":\"percent-encoding\",\"req\":\"^2.3\"},{\"default_features\":false,\"name\":\"pki-types\",\"package\":\"rustls-pki-types\",\"req\":\"^1.8\"},{\"kind\":\"dev\",\"name\":\"rcgen\",\"req\":\"^0.14\"},{\"kind\":\"dev\",\"name\":\"ring\",\"req\":\"^0.17.0\"},{\"kind\":\"dev\",\"name\":\"rustls\",\"req\":\"^0.23\"},{\"features\":[\"macros\",\"rt-multi-thread\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1\"},{\"features\":[\"alloc\"],\"kind\":\"dev\",\"name\":\"webpki\",\"package\":\"rustls-webpki\",\"req\":\"^0.103\"},{\"kind\":\"dev\",\"name\":\"x509-parser\",\"req\":\"^0.17.0\"},{\"kind\":\"dev\",\"name\":\"yasna\",\"req\":\"^0.5.2\"}],\"features\":{}}",
|
||||
"weezl_0.1.10": "{\"dependencies\":[{\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.3.1\"},{\"default_features\":false,\"features\":[\"std\"],\"name\":\"futures\",\"optional\":true,\"req\":\"^0.3.12\"},{\"default_features\":false,\"features\":[\"macros\",\"io-util\",\"net\",\"rt\",\"rt-multi-thread\"],\"kind\":\"dev\",\"name\":\"tokio\",\"req\":\"^1\"},{\"default_features\":false,\"features\":[\"compat\"],\"kind\":\"dev\",\"name\":\"tokio-util\",\"req\":\"^0.6.2\"}],\"features\":{\"alloc\":[],\"async\":[\"futures\",\"std\"],\"default\":[\"std\"],\"std\":[\"alloc\"]}}",
|
||||
"which_6.0.3": "{\"dependencies\":[{\"name\":\"either\",\"req\":\"^1.9.0\"},{\"name\":\"home\",\"req\":\"^0.5.9\",\"target\":\"cfg(any(windows, unix, target_os = \\\"redox\\\"))\"},{\"name\":\"regex\",\"optional\":true,\"req\":\"^1.10.2\"},{\"default_features\":false,\"features\":[\"fs\",\"std\"],\"name\":\"rustix\",\"req\":\"^0.38.30\",\"target\":\"cfg(any(unix, target_os = \\\"wasi\\\", target_os = \\\"redox\\\"))\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.9.0\"},{\"default_features\":false,\"name\":\"tracing\",\"optional\":true,\"req\":\"^0.1.40\"},{\"features\":[\"kernel\"],\"name\":\"winsafe\",\"req\":\"^0.0.19\",\"target\":\"cfg(windows)\"}],\"features\":{\"regex\":[\"dep:regex\"],\"tracing\":[\"dep:tracing\"]}}",
|
||||
"which_8.0.0": "{\"dependencies\":[{\"name\":\"env_home\",\"optional\":true,\"req\":\"^0.1.0\",\"target\":\"cfg(any(windows, unix, target_os = \\\"redox\\\"))\"},{\"name\":\"regex\",\"optional\":true,\"req\":\"^1.10.2\"},{\"default_features\":false,\"features\":[\"fs\",\"std\"],\"name\":\"rustix\",\"optional\":true,\"req\":\"^1.0.5\",\"target\":\"cfg(any(unix, target_os = \\\"wasi\\\", target_os = \\\"redox\\\"))\"},{\"kind\":\"dev\",\"name\":\"tempfile\",\"req\":\"^3.9.0\"},{\"default_features\":false,\"name\":\"tracing\",\"optional\":true,\"req\":\"^0.1.40\"},{\"features\":[\"kernel\"],\"name\":\"winsafe\",\"optional\":true,\"req\":\"^0.0.19\",\"target\":\"cfg(windows)\"}],\"features\":{\"default\":[\"real-sys\"],\"real-sys\":[\"dep:env_home\",\"dep:rustix\",\"dep:winsafe\"],\"regex\":[\"dep:regex\"],\"tracing\":[\"dep:tracing\"]}}",
|
||||
"wildmatch_2.6.1": "{\"dependencies\":[{\"default_features\":false,\"kind\":\"dev\",\"name\":\"criterion\",\"req\":\"^0.5.1\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"glob\",\"req\":\"^0.3.1\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"ntest\",\"req\":\"^0.9.0\"},{\"kind\":\"dev\",\"name\":\"rand\",\"req\":\"^0.8.5\"},{\"default_features\":false,\"kind\":\"dev\",\"name\":\"regex\",\"req\":\"^1.10.2\"},{\"kind\":\"dev\",\"name\":\"regex-lite\",\"req\":\"^0.1.5\"},{\"default_features\":false,\"features\":[\"derive\"],\"name\":\"serde\",\"optional\":true,\"req\":\"^1.0\"}],\"features\":{\"serde\":[\"dep:serde\"]}}",
|
||||
"winapi-i686-pc-windows-gnu_0.4.0": "{\"dependencies\":[],\"features\":{}}",
|
||||
"winapi-util_0.1.9": "{\"dependencies\":[{\"features\":[\"Win32_Foundation\",\"Win32_Storage_FileSystem\",\"Win32_System_Console\",\"Win32_System_SystemInformation\"],\"name\":\"windows-sys\",\"req\":\">=0.48.0, <=0.59\",\"target\":\"cfg(windows)\"}],\"features\":{}}",
|
||||
|
||||
104
codex-rs/Cargo.lock
generated
104
codex-rs/Cargo.lock
generated
@@ -360,7 +360,7 @@ dependencies = [
|
||||
"objc2-foundation",
|
||||
"parking_lot",
|
||||
"percent-encoding",
|
||||
"windows-sys 0.60.2",
|
||||
"windows-sys 0.52.0",
|
||||
"wl-clipboard-rs",
|
||||
"x11rb",
|
||||
]
|
||||
@@ -891,9 +891,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.53"
|
||||
version = "4.5.54"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8"
|
||||
checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
@@ -901,9 +901,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.5.53"
|
||||
version = "4.5.54"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00"
|
||||
checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
@@ -984,8 +984,10 @@ dependencies = [
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
"tokio-test",
|
||||
"tokio-tungstenite",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"url",
|
||||
"wiremock",
|
||||
]
|
||||
|
||||
@@ -1273,6 +1275,7 @@ dependencies = [
|
||||
"base64",
|
||||
"chardetng",
|
||||
"chrono",
|
||||
"clap",
|
||||
"codex-api",
|
||||
"codex-app-server-protocol",
|
||||
"codex-apply-patch",
|
||||
@@ -1306,6 +1309,7 @@ dependencies = [
|
||||
"image",
|
||||
"include_dir",
|
||||
"indexmap 2.12.0",
|
||||
"indoc",
|
||||
"keyring",
|
||||
"landlock",
|
||||
"libc",
|
||||
@@ -1320,6 +1324,7 @@ dependencies = [
|
||||
"regex",
|
||||
"regex-lite",
|
||||
"reqwest",
|
||||
"schemars 0.8.22",
|
||||
"seccompiler",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -1596,7 +1601,9 @@ dependencies = [
|
||||
"bytes",
|
||||
"codex-core",
|
||||
"futures",
|
||||
"pretty_assertions",
|
||||
"reqwest",
|
||||
"semver",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"tracing",
|
||||
@@ -1699,6 +1706,7 @@ dependencies = [
|
||||
"pretty_assertions",
|
||||
"reqwest",
|
||||
"rmcp",
|
||||
"schemars 0.8.22",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serial_test",
|
||||
@@ -2126,6 +2134,7 @@ dependencies = [
|
||||
"codex-protocol",
|
||||
"codex-utils-absolute-path",
|
||||
"codex-utils-cargo-bin",
|
||||
"futures",
|
||||
"notify",
|
||||
"pretty_assertions",
|
||||
"regex-lite",
|
||||
@@ -2134,6 +2143,7 @@ dependencies = [
|
||||
"shlex",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-tungstenite",
|
||||
"walkdir",
|
||||
"wiremock",
|
||||
]
|
||||
@@ -2361,6 +2371,12 @@ dependencies = [
|
||||
"syn 2.0.104",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "data-encoding"
|
||||
version = "2.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea"
|
||||
|
||||
[[package]]
|
||||
name = "dbus"
|
||||
version = "0.9.9"
|
||||
@@ -2763,6 +2779,12 @@ dependencies = [
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "env_home"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe"
|
||||
|
||||
[[package]]
|
||||
name = "env_logger"
|
||||
version = "0.11.8"
|
||||
@@ -2798,7 +2820,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.60.2",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2895,7 +2917,7 @@ checksum = "0ce92ff622d6dadf7349484f42c93271a0d49b7cc4d466a936405bacbe10aa78"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"rustix 1.0.8",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3836,7 +3858,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5347,7 +5369,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"socket2 0.6.1",
|
||||
"tracing",
|
||||
"windows-sys 0.60.2",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5726,7 +5748,7 @@ dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.4.15",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5739,7 +5761,7 @@ dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.9.4",
|
||||
"windows-sys 0.60.2",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7112,10 +7134,22 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-util"
|
||||
version = "0.7.16"
|
||||
name = "tokio-tungstenite"
|
||||
version = "0.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5"
|
||||
checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38"
|
||||
dependencies = [
|
||||
"futures-util",
|
||||
"log",
|
||||
"tokio",
|
||||
"tungstenite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-util"
|
||||
version = "0.7.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-core",
|
||||
@@ -7473,9 +7507,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
|
||||
|
||||
[[package]]
|
||||
name = "ts-rs"
|
||||
version = "11.0.1"
|
||||
version = "11.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ef1b7a6d914a34127ed8e1fa927eb7088903787bcded4fa3eef8f85ee1568be"
|
||||
checksum = "4994acea2522cd2b3b85c1d9529a55991e3ad5e25cdcd3de9d505972c4379424"
|
||||
dependencies = [
|
||||
"serde_json",
|
||||
"thiserror 2.0.17",
|
||||
@@ -7485,9 +7519,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "ts-rs-macros"
|
||||
version = "11.0.1"
|
||||
version = "11.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e9d4ed7b4c18cc150a6a0a1e9ea1ecfa688791220781af6e119f9599a8502a0a"
|
||||
checksum = "ee6ff59666c9cbaec3533964505d39154dc4e0a56151fdea30a09ed0301f62e2"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -7497,14 +7531,33 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tui-scrollbar"
|
||||
version = "0.2.1"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c42613099915b2e30e9f144670666e858e2538366f77742e1cf1c2f230efcacd"
|
||||
checksum = "0e4267311b5c7999a996ea94939b6d2b1b44a9e5cc11e76cbbb6dcca4c281df4"
|
||||
dependencies = [
|
||||
"document-features",
|
||||
"ratatui-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tungstenite"
|
||||
version = "0.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"bytes",
|
||||
"data-encoding",
|
||||
"http 1.3.1",
|
||||
"httparse",
|
||||
"log",
|
||||
"rand 0.8.5",
|
||||
"sha1",
|
||||
"thiserror 1.0.69",
|
||||
"url",
|
||||
"utf-8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.18.0"
|
||||
@@ -7989,13 +8042,12 @@ checksum = "a751b3277700db47d3e574514de2eced5e54dc8a5436a3bf7a0b248b2cee16f3"
|
||||
|
||||
[[package]]
|
||||
name = "which"
|
||||
version = "6.0.3"
|
||||
version = "8.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4ee928febd44d98f2f459a4a79bd4d928591333a494a10a868418ac1b39cf1f"
|
||||
checksum = "d3fabb953106c3c8eea8306e4393700d7657561cb43122571b172bbfb7c7ba1d"
|
||||
dependencies = [
|
||||
"either",
|
||||
"home",
|
||||
"rustix 0.38.44",
|
||||
"env_home",
|
||||
"rustix 1.0.8",
|
||||
"winsafe",
|
||||
]
|
||||
|
||||
@@ -8027,7 +8079,7 @@ version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -142,6 +142,7 @@ icu_decimal = "2.1"
|
||||
icu_locale_core = "2.1"
|
||||
icu_provider = { version = "2.1", features = ["sync"] }
|
||||
ignore = "0.4.23"
|
||||
indoc = "2.0"
|
||||
image = { version = "^0.25.9", default-features = false }
|
||||
include_dir = "0.7.4"
|
||||
indexmap = "2.12.0"
|
||||
@@ -192,6 +193,7 @@ serde_yaml = "0.9"
|
||||
serial_test = "3.2.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10"
|
||||
semver = "1.0"
|
||||
shlex = "1.3.0"
|
||||
similar = "2.7.0"
|
||||
socket2 = "0.6.1"
|
||||
@@ -209,7 +211,8 @@ tiny_http = "0.12"
|
||||
tokio = "1"
|
||||
tokio-stream = "0.1.18"
|
||||
tokio-test = "0.4"
|
||||
tokio-util = "0.7.16"
|
||||
tokio-tungstenite = "0.21.0"
|
||||
tokio-util = "0.7.18"
|
||||
toml = "0.9.5"
|
||||
toml_edit = "0.24.0"
|
||||
tracing = "0.1.43"
|
||||
@@ -221,7 +224,7 @@ tree-sitter-bash = "0.25"
|
||||
zstd = "0.13"
|
||||
tree-sitter-highlight = "0.25.10"
|
||||
ts-rs = "11"
|
||||
tui-scrollbar = "0.2.1"
|
||||
tui-scrollbar = "0.2.2"
|
||||
uds_windows = "1.1.0"
|
||||
unicode-segmentation = "1.12.0"
|
||||
unicode-width = "0.2"
|
||||
@@ -231,7 +234,7 @@ uuid = "1"
|
||||
vt100 = "0.16.2"
|
||||
walkdir = "2.5.0"
|
||||
webbrowser = "1.0"
|
||||
which = "6"
|
||||
which = "8"
|
||||
wildmatch = "2.6.1"
|
||||
|
||||
wiremock = "0.6"
|
||||
|
||||
@@ -156,6 +156,11 @@ client_request_definitions! {
|
||||
response: v2::McpServerOauthLoginResponse,
|
||||
},
|
||||
|
||||
McpServerRefresh => "config/mcpServer/reload" {
|
||||
params: #[ts(type = "undefined")] #[serde(skip_serializing_if = "Option::is_none")] Option<()>,
|
||||
response: v2::McpServerRefreshResponse,
|
||||
},
|
||||
|
||||
McpServerStatusList => "mcpServerStatus/list" {
|
||||
params: v2::ListMcpServerStatusParams,
|
||||
response: v2::ListMcpServerStatusResponse,
|
||||
|
||||
@@ -940,6 +940,16 @@ pub struct ListMcpServerStatusResponse {
|
||||
pub next_cursor: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct McpServerRefreshParams {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
pub struct McpServerRefreshResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export_to = "v2/")]
|
||||
|
||||
@@ -88,6 +88,7 @@ Example (from OpenAI's official VSCode extension):
|
||||
- `model/list` — list available models (with reasoning effort options).
|
||||
- `skills/list` — list skills for one or more `cwd` values (optional `forceReload`).
|
||||
- `mcpServer/oauth/login` — start an OAuth login for a configured MCP server; returns an `authorization_url` and later emits `mcpServer/oauthLogin/completed` once the browser flow finishes.
|
||||
- `config/mcpServer/reload` — reload MCP server config from disk and queue a refresh for loaded threads (applied on each thread's next active turn); returns `{}`. Use this after editing `config.toml` without restarting the server.
|
||||
- `mcpServerStatus/list` — enumerate configured MCP servers with their tools, resources, resource templates, and auth status; supports cursor+limit pagination.
|
||||
- `feedback/upload` — submit a feedback report (classification + optional reason/logs and conversation_id); returns the tracking thread id.
|
||||
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
|
||||
|
||||
@@ -60,6 +60,7 @@ use codex_app_server_protocol::LogoutChatGptResponse;
|
||||
use codex_app_server_protocol::McpServerOauthLoginCompletedNotification;
|
||||
use codex_app_server_protocol::McpServerOauthLoginParams;
|
||||
use codex_app_server_protocol::McpServerOauthLoginResponse;
|
||||
use codex_app_server_protocol::McpServerRefreshResponse;
|
||||
use codex_app_server_protocol::McpServerStatus;
|
||||
use codex_app_server_protocol::ModelListParams;
|
||||
use codex_app_server_protocol::ModelListResponse;
|
||||
@@ -157,6 +158,7 @@ use codex_protocol::items::TurnItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::GitInfo as CoreGitInfo;
|
||||
use codex_protocol::protocol::McpAuthStatus as CoreMcpAuthStatus;
|
||||
use codex_protocol::protocol::McpServerRefreshConfig;
|
||||
use codex_protocol::protocol::RateLimitSnapshot as CoreRateLimitSnapshot;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::SessionMetaLine;
|
||||
@@ -425,6 +427,9 @@ impl CodexMessageProcessor {
|
||||
ClientRequest::McpServerOauthLogin { request_id, params } => {
|
||||
self.mcp_server_oauth_login(request_id, params).await;
|
||||
}
|
||||
ClientRequest::McpServerRefresh { request_id, params } => {
|
||||
self.mcp_server_refresh(request_id, params).await;
|
||||
}
|
||||
ClientRequest::McpServerStatusList { request_id, params } => {
|
||||
self.list_mcp_server_status(request_id, params).await;
|
||||
}
|
||||
@@ -2302,6 +2307,57 @@ impl CodexMessageProcessor {
|
||||
outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
|
||||
async fn mcp_server_refresh(&self, request_id: RequestId, _params: Option<()>) {
|
||||
let config = match self.load_latest_config().await {
|
||||
Ok(config) => config,
|
||||
Err(error) => {
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let mcp_servers = match serde_json::to_value(config.mcp_servers.get()) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to serialize MCP servers: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let mcp_oauth_credentials_store_mode =
|
||||
match serde_json::to_value(config.mcp_oauth_credentials_store_mode) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!(
|
||||
"failed to serialize MCP OAuth credentials store mode: {err}"
|
||||
),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let refresh_config = McpServerRefreshConfig {
|
||||
mcp_servers,
|
||||
mcp_oauth_credentials_store_mode,
|
||||
};
|
||||
|
||||
// Refresh requests are queued per thread; each thread rebuilds MCP connections on its next
|
||||
// active turn to avoid work for threads that never resume.
|
||||
let thread_manager = Arc::clone(&self.thread_manager);
|
||||
thread_manager.refresh_mcp_servers(refresh_config).await;
|
||||
let response = McpServerRefreshResponse {};
|
||||
self.outgoing.send_response(request_id, response).await;
|
||||
}
|
||||
|
||||
async fn mcp_server_oauth_login(
|
||||
&self,
|
||||
request_id: RequestId,
|
||||
@@ -2321,7 +2377,7 @@ impl CodexMessageProcessor {
|
||||
timeout_secs,
|
||||
} = params;
|
||||
|
||||
let Some(server) = config.mcp_servers.get(&name) else {
|
||||
let Some(server) = config.mcp_servers.get().get(&name) else {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INVALID_REQUEST_ERROR_CODE,
|
||||
message: format!("No MCP server named '{name}' found."),
|
||||
@@ -2358,6 +2414,7 @@ impl CodexMessageProcessor {
|
||||
env_http_headers,
|
||||
scopes.as_deref().unwrap_or_default(),
|
||||
timeout_secs,
|
||||
config.mcp_oauth_callback_port,
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
||||
@@ -135,6 +135,7 @@ mod tests {
|
||||
CoreSandboxModeRequirement::ReadOnly,
|
||||
CoreSandboxModeRequirement::ExternalSandbox,
|
||||
]),
|
||||
mcp_server_requirements: None,
|
||||
};
|
||||
|
||||
let mapped = map_requirements_toml_to_api(requirements);
|
||||
|
||||
@@ -44,6 +44,7 @@ pub async fn run_main(
|
||||
codex_linux_sandbox_exe: Option<PathBuf>,
|
||||
cli_config_overrides: CliConfigOverrides,
|
||||
loader_overrides: LoaderOverrides,
|
||||
default_analytics_enabled: bool,
|
||||
) -> IoResult<()> {
|
||||
// Set up channels.
|
||||
let (incoming_tx, mut incoming_rx) = mpsc::channel::<JSONRPCMessage>(CHANNEL_CAPACITY);
|
||||
@@ -96,7 +97,7 @@ pub async fn run_main(
|
||||
&config,
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
Some("codex_app_server"),
|
||||
false,
|
||||
default_analytics_enabled,
|
||||
)
|
||||
.map_err(|e| {
|
||||
std::io::Error::new(
|
||||
|
||||
@@ -20,6 +20,7 @@ fn main() -> anyhow::Result<()> {
|
||||
codex_linux_sandbox_exe,
|
||||
CliConfigOverrides::default(),
|
||||
loader_overrides,
|
||||
false,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
|
||||
@@ -13,11 +13,15 @@ use codex_app_server_protocol::SendUserMessageParams;
|
||||
use codex_app_server_protocol::SendUserMessageResponse;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::DeveloperInstructions;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::RawResponseItemEvent;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use core_test_support::responses;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
|
||||
@@ -194,6 +198,9 @@ async fn test_send_message_raw_notifications_opt_in() -> Result<()> {
|
||||
})
|
||||
.await?;
|
||||
|
||||
let permissions = read_raw_response_item(&mut mcp, conversation_id).await;
|
||||
assert_permissions_message(&permissions);
|
||||
|
||||
let developer = read_raw_response_item(&mut mcp, conversation_id).await;
|
||||
assert_developer_message(&developer, "Use the test harness tools.");
|
||||
|
||||
@@ -340,6 +347,27 @@ fn assert_instructions_message(item: &ResponseItem) {
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_permissions_message(item: &ResponseItem) {
|
||||
match item {
|
||||
ResponseItem::Message { role, content, .. } => {
|
||||
assert_eq!(role, "developer");
|
||||
let texts = content_texts(content);
|
||||
let expected = DeveloperInstructions::from_policy(
|
||||
&SandboxPolicy::DangerFullAccess,
|
||||
AskForApproval::Never,
|
||||
&PathBuf::from("/tmp"),
|
||||
)
|
||||
.into_text();
|
||||
assert_eq!(
|
||||
texts,
|
||||
vec![expected.as_str()],
|
||||
"expected permissions developer message, got {texts:?}"
|
||||
);
|
||||
}
|
||||
other => panic!("expected permissions message, got {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_developer_message(item: &ResponseItem, expected_text: &str) {
|
||||
match item {
|
||||
ResponseItem::Message { role, content, .. } => {
|
||||
|
||||
66
codex-rs/app-server/tests/suite/v2/analytics.rs
Normal file
66
codex-rs/app-server/tests/suite/v2/analytics.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use anyhow::Result;
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use codex_core::config::types::OtelExporterKind;
|
||||
use codex_core::config::types::OtelHttpProtocol;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::collections::HashMap;
|
||||
use tempfile::TempDir;
|
||||
|
||||
const SERVICE_VERSION: &str = "0.0.0-test";
|
||||
|
||||
fn set_metrics_exporter(config: &mut codex_core::config::Config) {
|
||||
config.otel.metrics_exporter = OtelExporterKind::OtlpHttp {
|
||||
endpoint: "http://localhost:4318".to_string(),
|
||||
headers: HashMap::new(),
|
||||
protocol: OtelHttpProtocol::Json,
|
||||
tls: None,
|
||||
};
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn app_server_default_analytics_disabled_without_flag() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let mut config = ConfigBuilder::default()
|
||||
.codex_home(codex_home.path().to_path_buf())
|
||||
.build()
|
||||
.await?;
|
||||
set_metrics_exporter(&mut config);
|
||||
config.analytics_enabled = None;
|
||||
|
||||
let provider = codex_core::otel_init::build_provider(
|
||||
&config,
|
||||
SERVICE_VERSION,
|
||||
Some("codex_app_server"),
|
||||
false,
|
||||
)
|
||||
.map_err(|err| anyhow::anyhow!(err.to_string()))?;
|
||||
|
||||
// With analytics unset in the config and the default flag is false, metrics are disabled.
|
||||
// No provider is built.
|
||||
assert_eq!(provider.is_none(), true);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn app_server_default_analytics_enabled_with_flag() -> Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let mut config = ConfigBuilder::default()
|
||||
.codex_home(codex_home.path().to_path_buf())
|
||||
.build()
|
||||
.await?;
|
||||
set_metrics_exporter(&mut config);
|
||||
config.analytics_enabled = None;
|
||||
|
||||
let provider = codex_core::otel_init::build_provider(
|
||||
&config,
|
||||
SERVICE_VERSION,
|
||||
Some("codex_app_server"),
|
||||
true,
|
||||
)
|
||||
.map_err(|err| anyhow::anyhow!(err.to_string()))?;
|
||||
|
||||
// With analytics unset in the config and the default flag is true, metrics are enabled.
|
||||
let has_metrics = provider.as_ref().and_then(|otel| otel.metrics()).is_some();
|
||||
assert_eq!(has_metrics, true);
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
mod account;
|
||||
mod analytics;
|
||||
mod config_rpc;
|
||||
mod initialize;
|
||||
mod model_list;
|
||||
|
||||
@@ -450,13 +450,10 @@ fn test_parse_patch() {
|
||||
|
||||
assert_eq!(
|
||||
parse_patch_text(
|
||||
concat!(
|
||||
"*** Begin Patch",
|
||||
" ",
|
||||
"\n*** Add File: foo\n+hi\n",
|
||||
" ",
|
||||
"*** End Patch"
|
||||
),
|
||||
r#"*** Begin Patch
|
||||
*** Add File: foo
|
||||
+hi
|
||||
*** End Patch"#,
|
||||
ParseMode::Strict
|
||||
)
|
||||
.unwrap()
|
||||
@@ -468,9 +465,9 @@ fn test_parse_patch() {
|
||||
);
|
||||
assert_eq!(
|
||||
parse_patch_text(
|
||||
"*** Begin Patch\n\
|
||||
*** Update File: test.py\n\
|
||||
*** End Patch",
|
||||
r#"*** Begin Patch
|
||||
*** Update File: test.py
|
||||
*** End Patch"#,
|
||||
ParseMode::Strict
|
||||
),
|
||||
Err(InvalidHunkError {
|
||||
@@ -480,8 +477,8 @@ fn test_parse_patch() {
|
||||
);
|
||||
assert_eq!(
|
||||
parse_patch_text(
|
||||
"*** Begin Patch\n\
|
||||
*** End Patch",
|
||||
r#"*** Begin Patch
|
||||
*** End Patch"#,
|
||||
ParseMode::Strict
|
||||
)
|
||||
.unwrap()
|
||||
@@ -490,17 +487,17 @@ fn test_parse_patch() {
|
||||
);
|
||||
assert_eq!(
|
||||
parse_patch_text(
|
||||
"*** Begin Patch\n\
|
||||
*** Add File: path/add.py\n\
|
||||
+abc\n\
|
||||
+def\n\
|
||||
*** Delete File: path/delete.py\n\
|
||||
*** Update File: path/update.py\n\
|
||||
*** Move to: path/update2.py\n\
|
||||
@@ def f():\n\
|
||||
- pass\n\
|
||||
+ return 123\n\
|
||||
*** End Patch",
|
||||
r#"*** Begin Patch
|
||||
*** Add File: path/add.py
|
||||
+abc
|
||||
+def
|
||||
*** Delete File: path/delete.py
|
||||
*** Update File: path/update.py
|
||||
*** Move to: path/update2.py
|
||||
@@ def f():
|
||||
- pass
|
||||
+ return 123
|
||||
*** End Patch"#,
|
||||
ParseMode::Strict
|
||||
)
|
||||
.unwrap()
|
||||
@@ -528,13 +525,13 @@ fn test_parse_patch() {
|
||||
// Update hunk followed by another hunk (Add File).
|
||||
assert_eq!(
|
||||
parse_patch_text(
|
||||
"*** Begin Patch\n\
|
||||
*** Update File: file.py\n\
|
||||
@@\n\
|
||||
+line\n\
|
||||
*** Add File: other.py\n\
|
||||
+content\n\
|
||||
*** End Patch",
|
||||
r#"*** Begin Patch
|
||||
*** Update File: file.py
|
||||
@@
|
||||
+line
|
||||
*** Add File: other.py
|
||||
+content
|
||||
*** End Patch"#,
|
||||
ParseMode::Strict
|
||||
)
|
||||
.unwrap()
|
||||
|
||||
@@ -26,6 +26,7 @@ use codex_execpolicy::ExecPolicyCheckCommand;
|
||||
use codex_responses_api_proxy::Args as ResponsesApiProxyArgs;
|
||||
use codex_tui::AppExitInfo;
|
||||
use codex_tui::Cli as TuiCli;
|
||||
use codex_tui::ExitReason;
|
||||
use codex_tui::update_action::UpdateAction;
|
||||
use codex_tui2 as tui2;
|
||||
use owo_colors::OwoColorize;
|
||||
@@ -119,6 +120,9 @@ enum Subcommand {
|
||||
/// Resume a previous interactive session (picker by default; use --last to continue the most recent).
|
||||
Resume(ResumeCommand),
|
||||
|
||||
/// Fork a previous interactive session (picker by default; use --last to fork the most recent).
|
||||
Fork(ForkCommand),
|
||||
|
||||
/// [EXPERIMENTAL] Browse tasks from Codex Cloud and apply changes locally.
|
||||
#[clap(name = "cloud", alias = "cloud-tasks")]
|
||||
Cloud(CloudTasksCli),
|
||||
@@ -161,6 +165,25 @@ struct ResumeCommand {
|
||||
config_overrides: TuiCli,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct ForkCommand {
|
||||
/// Conversation/session id (UUID). When provided, forks this session.
|
||||
/// If omitted, use --last to pick the most recent recorded session.
|
||||
#[arg(value_name = "SESSION_ID")]
|
||||
session_id: Option<String>,
|
||||
|
||||
/// Fork the most recent session without showing the picker.
|
||||
#[arg(long = "last", default_value_t = false, conflicts_with = "session_id")]
|
||||
last: bool,
|
||||
|
||||
/// Show all sessions (disables cwd filtering and shows CWD column).
|
||||
#[arg(long = "all", default_value_t = false)]
|
||||
all: bool,
|
||||
|
||||
#[clap(flatten)]
|
||||
config_overrides: TuiCli,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct SandboxArgs {
|
||||
#[command(subcommand)]
|
||||
@@ -246,6 +269,24 @@ struct AppServerCommand {
|
||||
/// Omit to run the app server; specify a subcommand for tooling.
|
||||
#[command(subcommand)]
|
||||
subcommand: Option<AppServerSubcommand>,
|
||||
|
||||
/// Controls whether analytics are enabled by default.
|
||||
///
|
||||
/// Analytics are disabled by default for app-server. Users have to explicitly opt in
|
||||
/// via the `analytics` section in the config.toml file.
|
||||
///
|
||||
/// However, for first-party use cases like the VSCode IDE extension, we default analytics
|
||||
/// to be enabled by default by setting this flag. Users can still opt out by setting this
|
||||
/// in their config.toml:
|
||||
///
|
||||
/// ```toml
|
||||
/// [analytics]
|
||||
/// enabled = false
|
||||
/// ```
|
||||
///
|
||||
/// See https://developers.openai.com/codex/config-advanced/#metrics for more details.
|
||||
#[arg(long = "analytics-default-enabled")]
|
||||
analytics_default_enabled: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, clap::Subcommand)]
|
||||
@@ -313,6 +354,14 @@ fn format_exit_messages(exit_info: AppExitInfo, color_enabled: bool) -> Vec<Stri
|
||||
|
||||
/// Handle the app exit and print the results. Optionally run the update action.
|
||||
fn handle_app_exit(exit_info: AppExitInfo) -> anyhow::Result<()> {
|
||||
match exit_info.exit_reason {
|
||||
ExitReason::Fatal(message) => {
|
||||
eprintln!("ERROR: {message}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
ExitReason::UserRequested => { /* normal exit */ }
|
||||
}
|
||||
|
||||
let update_action = exit_info.update_action;
|
||||
let color_enabled = supports_color::on(Stream::Stdout).is_some();
|
||||
for line in format_exit_messages(exit_info, color_enabled) {
|
||||
@@ -478,6 +527,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
codex_linux_sandbox_exe,
|
||||
root_config_overrides,
|
||||
codex_core::config_loader::LoaderOverrides::default(),
|
||||
app_server_cli.analytics_default_enabled,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
@@ -508,6 +558,23 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
let exit_info = run_interactive_tui(interactive, codex_linux_sandbox_exe).await?;
|
||||
handle_app_exit(exit_info)?;
|
||||
}
|
||||
Some(Subcommand::Fork(ForkCommand {
|
||||
session_id,
|
||||
last,
|
||||
all,
|
||||
config_overrides,
|
||||
})) => {
|
||||
interactive = finalize_fork_interactive(
|
||||
interactive,
|
||||
root_config_overrides.clone(),
|
||||
session_id,
|
||||
last,
|
||||
all,
|
||||
config_overrides,
|
||||
);
|
||||
let exit_info = run_interactive_tui(interactive, codex_linux_sandbox_exe).await?;
|
||||
handle_app_exit(exit_info)?;
|
||||
}
|
||||
Some(Subcommand::Login(mut login_cli)) => {
|
||||
prepend_config_flags(
|
||||
&mut login_cli.config_overrides,
|
||||
@@ -725,7 +792,7 @@ fn finalize_resume_interactive(
|
||||
interactive.resume_show_all = show_all;
|
||||
|
||||
// Merge resume-scoped flags and overrides with highest precedence.
|
||||
merge_resume_cli_flags(&mut interactive, resume_cli);
|
||||
merge_interactive_cli_flags(&mut interactive, resume_cli);
|
||||
|
||||
// Propagate any root-level config overrides (e.g. `-c key=value`).
|
||||
prepend_config_flags(&mut interactive.config_overrides, root_config_overrides);
|
||||
@@ -733,51 +800,77 @@ fn finalize_resume_interactive(
|
||||
interactive
|
||||
}
|
||||
|
||||
/// Merge flags provided to `codex resume` so they take precedence over any
|
||||
/// root-level flags. Only overrides fields explicitly set on the resume-scoped
|
||||
/// Build the final `TuiCli` for a `codex fork` invocation.
|
||||
fn finalize_fork_interactive(
|
||||
mut interactive: TuiCli,
|
||||
root_config_overrides: CliConfigOverrides,
|
||||
session_id: Option<String>,
|
||||
last: bool,
|
||||
show_all: bool,
|
||||
fork_cli: TuiCli,
|
||||
) -> TuiCli {
|
||||
// Start with the parsed interactive CLI so fork shares the same
|
||||
// configuration surface area as `codex` without additional flags.
|
||||
let fork_session_id = session_id;
|
||||
interactive.fork_picker = fork_session_id.is_none() && !last;
|
||||
interactive.fork_last = last;
|
||||
interactive.fork_session_id = fork_session_id;
|
||||
interactive.fork_show_all = show_all;
|
||||
|
||||
// Merge fork-scoped flags and overrides with highest precedence.
|
||||
merge_interactive_cli_flags(&mut interactive, fork_cli);
|
||||
|
||||
// Propagate any root-level config overrides (e.g. `-c key=value`).
|
||||
prepend_config_flags(&mut interactive.config_overrides, root_config_overrides);
|
||||
|
||||
interactive
|
||||
}
|
||||
|
||||
/// Merge flags provided to `codex resume`/`codex fork` so they take precedence over any
|
||||
/// root-level flags. Only overrides fields explicitly set on the subcommand-scoped
|
||||
/// CLI. Also appends `-c key=value` overrides with highest precedence.
|
||||
fn merge_resume_cli_flags(interactive: &mut TuiCli, resume_cli: TuiCli) {
|
||||
if let Some(model) = resume_cli.model {
|
||||
fn merge_interactive_cli_flags(interactive: &mut TuiCli, subcommand_cli: TuiCli) {
|
||||
if let Some(model) = subcommand_cli.model {
|
||||
interactive.model = Some(model);
|
||||
}
|
||||
if resume_cli.oss {
|
||||
if subcommand_cli.oss {
|
||||
interactive.oss = true;
|
||||
}
|
||||
if let Some(profile) = resume_cli.config_profile {
|
||||
if let Some(profile) = subcommand_cli.config_profile {
|
||||
interactive.config_profile = Some(profile);
|
||||
}
|
||||
if let Some(sandbox) = resume_cli.sandbox_mode {
|
||||
if let Some(sandbox) = subcommand_cli.sandbox_mode {
|
||||
interactive.sandbox_mode = Some(sandbox);
|
||||
}
|
||||
if let Some(approval) = resume_cli.approval_policy {
|
||||
if let Some(approval) = subcommand_cli.approval_policy {
|
||||
interactive.approval_policy = Some(approval);
|
||||
}
|
||||
if resume_cli.full_auto {
|
||||
if subcommand_cli.full_auto {
|
||||
interactive.full_auto = true;
|
||||
}
|
||||
if resume_cli.dangerously_bypass_approvals_and_sandbox {
|
||||
if subcommand_cli.dangerously_bypass_approvals_and_sandbox {
|
||||
interactive.dangerously_bypass_approvals_and_sandbox = true;
|
||||
}
|
||||
if let Some(cwd) = resume_cli.cwd {
|
||||
if let Some(cwd) = subcommand_cli.cwd {
|
||||
interactive.cwd = Some(cwd);
|
||||
}
|
||||
if resume_cli.web_search {
|
||||
if subcommand_cli.web_search {
|
||||
interactive.web_search = true;
|
||||
}
|
||||
if !resume_cli.images.is_empty() {
|
||||
interactive.images = resume_cli.images;
|
||||
if !subcommand_cli.images.is_empty() {
|
||||
interactive.images = subcommand_cli.images;
|
||||
}
|
||||
if !resume_cli.add_dir.is_empty() {
|
||||
interactive.add_dir.extend(resume_cli.add_dir);
|
||||
if !subcommand_cli.add_dir.is_empty() {
|
||||
interactive.add_dir.extend(subcommand_cli.add_dir);
|
||||
}
|
||||
if let Some(prompt) = resume_cli.prompt {
|
||||
if let Some(prompt) = subcommand_cli.prompt {
|
||||
interactive.prompt = Some(prompt);
|
||||
}
|
||||
|
||||
interactive
|
||||
.config_overrides
|
||||
.raw_overrides
|
||||
.extend(resume_cli.config_overrides.raw_overrides);
|
||||
.extend(subcommand_cli.config_overrides.raw_overrides);
|
||||
}
|
||||
|
||||
fn print_completion(cmd: CompletionCommand) {
|
||||
@@ -794,7 +887,7 @@ mod tests {
|
||||
use codex_protocol::ThreadId;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
fn finalize_from_args(args: &[&str]) -> TuiCli {
|
||||
fn finalize_resume_from_args(args: &[&str]) -> TuiCli {
|
||||
let cli = MultitoolCli::try_parse_from(args).expect("parse");
|
||||
let MultitoolCli {
|
||||
interactive,
|
||||
@@ -823,6 +916,36 @@ mod tests {
|
||||
)
|
||||
}
|
||||
|
||||
fn finalize_fork_from_args(args: &[&str]) -> TuiCli {
|
||||
let cli = MultitoolCli::try_parse_from(args).expect("parse");
|
||||
let MultitoolCli {
|
||||
interactive,
|
||||
config_overrides: root_overrides,
|
||||
subcommand,
|
||||
feature_toggles: _,
|
||||
} = cli;
|
||||
|
||||
let Subcommand::Fork(ForkCommand {
|
||||
session_id,
|
||||
last,
|
||||
all,
|
||||
config_overrides: fork_cli,
|
||||
}) = subcommand.expect("fork present")
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
finalize_fork_interactive(interactive, root_overrides, session_id, last, all, fork_cli)
|
||||
}
|
||||
|
||||
fn app_server_from_args(args: &[&str]) -> AppServerCommand {
|
||||
let cli = MultitoolCli::try_parse_from(args).expect("parse");
|
||||
let Subcommand::AppServer(app_server) = cli.subcommand.expect("app-server present") else {
|
||||
unreachable!()
|
||||
};
|
||||
app_server
|
||||
}
|
||||
|
||||
fn sample_exit_info(conversation: Option<&str>) -> AppExitInfo {
|
||||
let token_usage = TokenUsage {
|
||||
output_tokens: 2,
|
||||
@@ -833,6 +956,7 @@ mod tests {
|
||||
token_usage,
|
||||
thread_id: conversation.map(ThreadId::from_string).map(Result::unwrap),
|
||||
update_action: None,
|
||||
exit_reason: ExitReason::UserRequested,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -842,6 +966,7 @@ mod tests {
|
||||
token_usage: TokenUsage::default(),
|
||||
thread_id: None,
|
||||
update_action: None,
|
||||
exit_reason: ExitReason::UserRequested,
|
||||
};
|
||||
let lines = format_exit_messages(exit_info, false);
|
||||
assert!(lines.is_empty());
|
||||
@@ -871,7 +996,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn resume_model_flag_applies_when_no_root_flags() {
|
||||
let interactive = finalize_from_args(["codex", "resume", "-m", "gpt-5.1-test"].as_ref());
|
||||
let interactive =
|
||||
finalize_resume_from_args(["codex", "resume", "-m", "gpt-5.1-test"].as_ref());
|
||||
|
||||
assert_eq!(interactive.model.as_deref(), Some("gpt-5.1-test"));
|
||||
assert!(interactive.resume_picker);
|
||||
@@ -881,7 +1007,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn resume_picker_logic_none_and_not_last() {
|
||||
let interactive = finalize_from_args(["codex", "resume"].as_ref());
|
||||
let interactive = finalize_resume_from_args(["codex", "resume"].as_ref());
|
||||
assert!(interactive.resume_picker);
|
||||
assert!(!interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id, None);
|
||||
@@ -890,7 +1016,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn resume_picker_logic_last() {
|
||||
let interactive = finalize_from_args(["codex", "resume", "--last"].as_ref());
|
||||
let interactive = finalize_resume_from_args(["codex", "resume", "--last"].as_ref());
|
||||
assert!(!interactive.resume_picker);
|
||||
assert!(interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id, None);
|
||||
@@ -899,7 +1025,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn resume_picker_logic_with_session_id() {
|
||||
let interactive = finalize_from_args(["codex", "resume", "1234"].as_ref());
|
||||
let interactive = finalize_resume_from_args(["codex", "resume", "1234"].as_ref());
|
||||
assert!(!interactive.resume_picker);
|
||||
assert!(!interactive.resume_last);
|
||||
assert_eq!(interactive.resume_session_id.as_deref(), Some("1234"));
|
||||
@@ -908,14 +1034,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn resume_all_flag_sets_show_all() {
|
||||
let interactive = finalize_from_args(["codex", "resume", "--all"].as_ref());
|
||||
let interactive = finalize_resume_from_args(["codex", "resume", "--all"].as_ref());
|
||||
assert!(interactive.resume_picker);
|
||||
assert!(interactive.resume_show_all);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_merges_option_flags_and_full_auto() {
|
||||
let interactive = finalize_from_args(
|
||||
let interactive = finalize_resume_from_args(
|
||||
[
|
||||
"codex",
|
||||
"resume",
|
||||
@@ -972,7 +1098,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn resume_merges_dangerously_bypass_flag() {
|
||||
let interactive = finalize_from_args(
|
||||
let interactive = finalize_resume_from_args(
|
||||
[
|
||||
"codex",
|
||||
"resume",
|
||||
@@ -986,6 +1112,53 @@ mod tests {
|
||||
assert_eq!(interactive.resume_session_id, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fork_picker_logic_none_and_not_last() {
|
||||
let interactive = finalize_fork_from_args(["codex", "fork"].as_ref());
|
||||
assert!(interactive.fork_picker);
|
||||
assert!(!interactive.fork_last);
|
||||
assert_eq!(interactive.fork_session_id, None);
|
||||
assert!(!interactive.fork_show_all);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fork_picker_logic_last() {
|
||||
let interactive = finalize_fork_from_args(["codex", "fork", "--last"].as_ref());
|
||||
assert!(!interactive.fork_picker);
|
||||
assert!(interactive.fork_last);
|
||||
assert_eq!(interactive.fork_session_id, None);
|
||||
assert!(!interactive.fork_show_all);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fork_picker_logic_with_session_id() {
|
||||
let interactive = finalize_fork_from_args(["codex", "fork", "1234"].as_ref());
|
||||
assert!(!interactive.fork_picker);
|
||||
assert!(!interactive.fork_last);
|
||||
assert_eq!(interactive.fork_session_id.as_deref(), Some("1234"));
|
||||
assert!(!interactive.fork_show_all);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fork_all_flag_sets_show_all() {
|
||||
let interactive = finalize_fork_from_args(["codex", "fork", "--all"].as_ref());
|
||||
assert!(interactive.fork_picker);
|
||||
assert!(interactive.fork_show_all);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn app_server_analytics_default_disabled_without_flag() {
|
||||
let app_server = app_server_from_args(["codex", "app-server"].as_ref());
|
||||
assert!(!app_server.analytics_default_enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn app_server_analytics_default_enabled_with_flag() {
|
||||
let app_server =
|
||||
app_server_from_args(["codex", "app-server", "--analytics-default-enabled"].as_ref());
|
||||
assert!(app_server.analytics_default_enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn feature_toggles_known_features_generate_overrides() {
|
||||
let toggles = FeatureToggles {
|
||||
|
||||
@@ -274,6 +274,7 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re
|
||||
http_headers.clone(),
|
||||
env_http_headers.clone(),
|
||||
&Vec::new(),
|
||||
config.mcp_oauth_callback_port,
|
||||
)
|
||||
.await?;
|
||||
println!("Successfully logged in.");
|
||||
@@ -331,7 +332,7 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs)
|
||||
|
||||
let LoginArgs { name, scopes } = login_args;
|
||||
|
||||
let Some(server) = config.mcp_servers.get(&name) else {
|
||||
let Some(server) = config.mcp_servers.get().get(&name) else {
|
||||
bail!("No MCP server named '{name}' found.");
|
||||
};
|
||||
|
||||
@@ -352,6 +353,7 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs)
|
||||
http_headers,
|
||||
env_http_headers,
|
||||
&scopes,
|
||||
config.mcp_oauth_callback_port,
|
||||
)
|
||||
.await?;
|
||||
println!("Successfully logged in to MCP server '{name}'.");
|
||||
@@ -370,6 +372,7 @@ async fn run_logout(config_overrides: &CliConfigOverrides, logout_args: LogoutAr
|
||||
|
||||
let server = config
|
||||
.mcp_servers
|
||||
.get()
|
||||
.get(&name)
|
||||
.ok_or_else(|| anyhow!("No MCP server named '{name}' found in configuration."))?;
|
||||
|
||||
@@ -652,7 +655,7 @@ async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Re
|
||||
.await
|
||||
.context("failed to load configuration")?;
|
||||
|
||||
let Some(server) = config.mcp_servers.get(&get_args.name) else {
|
||||
let Some(server) = config.mcp_servers.get().get(&get_args.name) else {
|
||||
bail!("No MCP server named '{name}' found.", name = get_args.name);
|
||||
};
|
||||
|
||||
|
||||
@@ -14,11 +14,13 @@ http = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["macros", "rt", "sync", "time"] }
|
||||
tokio = { workspace = true, features = ["macros", "net", "rt", "sync", "time"] }
|
||||
tokio-tungstenite = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
eventsource-stream = { workspace = true }
|
||||
regex-lite = { workspace = true }
|
||||
tokio-util = { workspace = true, features = ["codec"] }
|
||||
url = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
anyhow = { workspace = true }
|
||||
|
||||
@@ -136,6 +136,38 @@ pub struct ResponsesApiRequest<'a> {
|
||||
pub text: Option<TextControls>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct ResponseCreateWsRequest {
|
||||
pub model: String,
|
||||
pub instructions: String,
|
||||
pub input: Vec<ResponseItem>,
|
||||
pub tools: Vec<Value>,
|
||||
pub tool_choice: String,
|
||||
pub parallel_tool_calls: bool,
|
||||
pub reasoning: Option<Reasoning>,
|
||||
pub store: bool,
|
||||
pub stream: bool,
|
||||
pub include: Vec<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub prompt_cache_key: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub text: Option<TextControls>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct ResponseAppendWsRequest {
|
||||
pub input: Vec<ResponseItem>,
|
||||
}
|
||||
#[derive(Debug, Serialize)]
|
||||
#[serde(tag = "type")]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum ResponsesWsRequest {
|
||||
#[serde(rename = "response.create")]
|
||||
ResponseCreate(ResponseCreateWsRequest),
|
||||
#[serde(rename = "response.append")]
|
||||
ResponseAppend(ResponseAppendWsRequest),
|
||||
}
|
||||
|
||||
pub fn create_text_param_for_request(
|
||||
verbosity: Option<VerbosityConfig>,
|
||||
output_schema: &Option<Value>,
|
||||
|
||||
@@ -2,4 +2,5 @@ pub mod chat;
|
||||
pub mod compact;
|
||||
pub mod models;
|
||||
pub mod responses;
|
||||
pub mod responses_websocket;
|
||||
mod streaming;
|
||||
|
||||
253
codex-rs/codex-api/src/endpoint/responses_websocket.rs
Normal file
253
codex-rs/codex-api/src/endpoint/responses_websocket.rs
Normal file
@@ -0,0 +1,253 @@
|
||||
use crate::auth::AuthProvider;
|
||||
use crate::common::ResponseEvent;
|
||||
use crate::common::ResponseStream;
|
||||
use crate::common::ResponsesWsRequest;
|
||||
use crate::error::ApiError;
|
||||
use crate::provider::Provider;
|
||||
use crate::sse::responses::ResponsesStreamEvent;
|
||||
use crate::sse::responses::process_responses_event;
|
||||
use codex_client::TransportError;
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use http::HeaderMap;
|
||||
use http::HeaderValue;
|
||||
use serde_json::Value;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_tungstenite::MaybeTlsStream;
|
||||
use tokio_tungstenite::WebSocketStream;
|
||||
use tokio_tungstenite::tungstenite::Error as WsError;
|
||||
use tokio_tungstenite::tungstenite::Message;
|
||||
use tokio_tungstenite::tungstenite::client::IntoClientRequest;
|
||||
use tracing::debug;
|
||||
use tracing::trace;
|
||||
use url::Url;
|
||||
|
||||
type WsStream = WebSocketStream<MaybeTlsStream<TcpStream>>;
|
||||
|
||||
pub struct ResponsesWebsocketConnection {
|
||||
stream: Arc<Mutex<Option<WsStream>>>,
|
||||
// TODO (pakrym): is this the right place for timeout?
|
||||
idle_timeout: Duration,
|
||||
}
|
||||
|
||||
impl ResponsesWebsocketConnection {
|
||||
fn new(stream: WsStream, idle_timeout: Duration) -> Self {
|
||||
Self {
|
||||
stream: Arc::new(Mutex::new(Some(stream))),
|
||||
idle_timeout,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn is_closed(&self) -> bool {
|
||||
self.stream.lock().await.is_none()
|
||||
}
|
||||
|
||||
pub async fn stream_request(
|
||||
&self,
|
||||
request: ResponsesWsRequest,
|
||||
) -> Result<ResponseStream, ApiError> {
|
||||
let (tx_event, rx_event) =
|
||||
mpsc::channel::<std::result::Result<ResponseEvent, ApiError>>(1600);
|
||||
let stream = Arc::clone(&self.stream);
|
||||
let idle_timeout = self.idle_timeout;
|
||||
let request_body = serde_json::to_value(&request).map_err(|err| {
|
||||
ApiError::Stream(format!("failed to encode websocket request: {err}"))
|
||||
})?;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut guard = stream.lock().await;
|
||||
let Some(ws_stream) = guard.as_mut() else {
|
||||
let _ = tx_event
|
||||
.send(Err(ApiError::Stream(
|
||||
"websocket connection is closed".to_string(),
|
||||
)))
|
||||
.await;
|
||||
return;
|
||||
};
|
||||
|
||||
if let Err(err) = run_websocket_response_stream(
|
||||
ws_stream,
|
||||
tx_event.clone(),
|
||||
request_body,
|
||||
idle_timeout,
|
||||
)
|
||||
.await
|
||||
{
|
||||
let _ = ws_stream.close(None).await;
|
||||
*guard = None;
|
||||
let _ = tx_event.send(Err(err)).await;
|
||||
}
|
||||
});
|
||||
|
||||
Ok(ResponseStream { rx_event })
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ResponsesWebsocketClient<A: AuthProvider> {
|
||||
provider: Provider,
|
||||
auth: A,
|
||||
}
|
||||
|
||||
impl<A: AuthProvider> ResponsesWebsocketClient<A> {
|
||||
pub fn new(provider: Provider, auth: A) -> Self {
|
||||
Self { provider, auth }
|
||||
}
|
||||
|
||||
pub async fn connect(
|
||||
&self,
|
||||
extra_headers: HeaderMap,
|
||||
) -> Result<ResponsesWebsocketConnection, ApiError> {
|
||||
let ws_url = Url::parse(&self.provider.url_for_path("responses"))
|
||||
.map_err(|err| ApiError::Stream(format!("failed to build websocket URL: {err}")))?;
|
||||
|
||||
let mut headers = self.provider.headers.clone();
|
||||
headers.extend(extra_headers);
|
||||
apply_auth_headers(&mut headers, &self.auth);
|
||||
|
||||
let stream = connect_websocket(ws_url, headers).await?;
|
||||
Ok(ResponsesWebsocketConnection::new(
|
||||
stream,
|
||||
self.provider.stream_idle_timeout,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// TODO (pakrym): share with /auth
|
||||
fn apply_auth_headers(headers: &mut HeaderMap, auth: &impl AuthProvider) {
|
||||
if let Some(token) = auth.bearer_token()
|
||||
&& let Ok(header) = HeaderValue::from_str(&format!("Bearer {token}"))
|
||||
{
|
||||
let _ = headers.insert(http::header::AUTHORIZATION, header);
|
||||
}
|
||||
if let Some(account_id) = auth.account_id()
|
||||
&& let Ok(header) = HeaderValue::from_str(&account_id)
|
||||
{
|
||||
let _ = headers.insert("ChatGPT-Account-ID", header);
|
||||
}
|
||||
}
|
||||
|
||||
async fn connect_websocket(url: Url, headers: HeaderMap) -> Result<WsStream, ApiError> {
|
||||
let mut request = url
|
||||
.clone()
|
||||
.into_client_request()
|
||||
.map_err(|err| ApiError::Stream(format!("failed to build websocket request: {err}")))?;
|
||||
request.headers_mut().extend(headers);
|
||||
|
||||
let (stream, _) = tokio_tungstenite::connect_async(request)
|
||||
.await
|
||||
.map_err(|err| map_ws_error(err, &url))?;
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
fn map_ws_error(err: WsError, url: &Url) -> ApiError {
|
||||
match err {
|
||||
WsError::Http(response) => {
|
||||
let status = response.status();
|
||||
let headers = response.headers().clone();
|
||||
let body = response
|
||||
.body()
|
||||
.as_ref()
|
||||
.and_then(|bytes| String::from_utf8(bytes.clone()).ok());
|
||||
ApiError::Transport(TransportError::Http {
|
||||
status,
|
||||
url: Some(url.to_string()),
|
||||
headers: Some(headers),
|
||||
body,
|
||||
})
|
||||
}
|
||||
WsError::ConnectionClosed | WsError::AlreadyClosed => {
|
||||
ApiError::Stream("websocket closed".to_string())
|
||||
}
|
||||
WsError::Io(err) => ApiError::Transport(TransportError::Network(err.to_string())),
|
||||
other => ApiError::Transport(TransportError::Network(other.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_websocket_response_stream(
|
||||
ws_stream: &mut WsStream,
|
||||
tx_event: mpsc::Sender<std::result::Result<ResponseEvent, ApiError>>,
|
||||
request_body: Value,
|
||||
idle_timeout: Duration,
|
||||
) -> Result<(), ApiError> {
|
||||
let request_text = match serde_json::to_string(&request_body) {
|
||||
Ok(text) => text,
|
||||
Err(err) => {
|
||||
return Err(ApiError::Stream(format!(
|
||||
"failed to encode websocket request: {err}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(err) = ws_stream.send(Message::Text(request_text)).await {
|
||||
return Err(ApiError::Stream(format!(
|
||||
"failed to send websocket request: {err}"
|
||||
)));
|
||||
}
|
||||
|
||||
loop {
|
||||
let response = tokio::time::timeout(idle_timeout, ws_stream.next())
|
||||
.await
|
||||
.map_err(|_| ApiError::Stream("idle timeout waiting for websocket".into()));
|
||||
let message = match response {
|
||||
Ok(Some(Ok(msg))) => msg,
|
||||
Ok(Some(Err(err))) => {
|
||||
return Err(ApiError::Stream(err.to_string()));
|
||||
}
|
||||
Ok(None) => {
|
||||
return Err(ApiError::Stream(
|
||||
"stream closed before response.completed".into(),
|
||||
));
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
match message {
|
||||
Message::Text(text) => {
|
||||
trace!("websocket event: {text}");
|
||||
let event = match serde_json::from_str::<ResponsesStreamEvent>(&text) {
|
||||
Ok(event) => event,
|
||||
Err(err) => {
|
||||
debug!("failed to parse websocket event: {err}, data: {text}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
match process_responses_event(event) {
|
||||
Ok(Some(event)) => {
|
||||
let is_completed = matches!(event, ResponseEvent::Completed { .. });
|
||||
let _ = tx_event.send(Ok(event)).await;
|
||||
if is_completed {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(None) => {}
|
||||
Err(error) => {
|
||||
return Err(error.into_api_error());
|
||||
}
|
||||
}
|
||||
}
|
||||
Message::Binary(_) => {
|
||||
return Err(ApiError::Stream("unexpected binary websocket event".into()));
|
||||
}
|
||||
Message::Ping(payload) => {
|
||||
if ws_stream.send(Message::Pong(payload)).await.is_err() {
|
||||
return Err(ApiError::Stream("websocket ping failed".into()));
|
||||
}
|
||||
}
|
||||
Message::Pong(_) => {}
|
||||
Message::Close(_) => {
|
||||
return Err(ApiError::Stream(
|
||||
"websocket closed before response.completed".into(),
|
||||
));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -8,6 +8,7 @@ pub mod requests;
|
||||
pub mod sse;
|
||||
pub mod telemetry;
|
||||
|
||||
pub use crate::requests::headers::build_conversation_headers;
|
||||
pub use codex_client::RequestTelemetry;
|
||||
pub use codex_client::ReqwestTransport;
|
||||
pub use codex_client::TransportError;
|
||||
@@ -15,6 +16,8 @@ pub use codex_client::TransportError;
|
||||
pub use crate::auth::AuthProvider;
|
||||
pub use crate::common::CompactionInput;
|
||||
pub use crate::common::Prompt;
|
||||
pub use crate::common::ResponseAppendWsRequest;
|
||||
pub use crate::common::ResponseCreateWsRequest;
|
||||
pub use crate::common::ResponseEvent;
|
||||
pub use crate::common::ResponseStream;
|
||||
pub use crate::common::ResponsesApiRequest;
|
||||
@@ -25,6 +28,8 @@ pub use crate::endpoint::compact::CompactClient;
|
||||
pub use crate::endpoint::models::ModelsClient;
|
||||
pub use crate::endpoint::responses::ResponsesClient;
|
||||
pub use crate::endpoint::responses::ResponsesOptions;
|
||||
pub use crate::endpoint::responses_websocket::ResponsesWebsocketClient;
|
||||
pub use crate::endpoint::responses_websocket::ResponsesWebsocketConnection;
|
||||
pub use crate::error::ApiError;
|
||||
pub use crate::provider::Provider;
|
||||
pub use crate::provider::WireApi;
|
||||
|
||||
@@ -393,10 +393,6 @@ mod tests {
|
||||
.build(&provider())
|
||||
.expect("request");
|
||||
|
||||
assert_eq!(
|
||||
req.headers.get("conversation_id"),
|
||||
Some(&HeaderValue::from_static("conv-1"))
|
||||
);
|
||||
assert_eq!(
|
||||
req.headers.get("session_id"),
|
||||
Some(&HeaderValue::from_static("conv-1"))
|
||||
|
||||
@@ -2,10 +2,9 @@ use codex_protocol::protocol::SessionSource;
|
||||
use http::HeaderMap;
|
||||
use http::HeaderValue;
|
||||
|
||||
pub(crate) fn build_conversation_headers(conversation_id: Option<String>) -> HeaderMap {
|
||||
pub fn build_conversation_headers(conversation_id: Option<String>) -> HeaderMap {
|
||||
let mut headers = HeaderMap::new();
|
||||
if let Some(id) = conversation_id {
|
||||
insert_header(&mut headers, "conversation_id", &id);
|
||||
insert_header(&mut headers, "session_id", &id);
|
||||
}
|
||||
headers
|
||||
|
||||
@@ -249,10 +249,6 @@ mod tests {
|
||||
.collect();
|
||||
assert_eq!(ids, vec![Some("m1".to_string()), None]);
|
||||
|
||||
assert_eq!(
|
||||
request.headers.get("conversation_id"),
|
||||
Some(&HeaderValue::from_static("conv-1"))
|
||||
);
|
||||
assert_eq!(
|
||||
request.headers.get("session_id"),
|
||||
Some(&HeaderValue::from_static("conv-1"))
|
||||
|
||||
@@ -88,6 +88,14 @@ struct ResponseCompleted {
|
||||
usage: Option<ResponseCompletedUsage>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct ResponseDone {
|
||||
#[serde(default)]
|
||||
id: Option<String>,
|
||||
#[serde(default)]
|
||||
usage: Option<ResponseCompletedUsage>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct ResponseCompletedUsage {
|
||||
input_tokens: i64,
|
||||
@@ -126,7 +134,7 @@ struct ResponseCompletedOutputTokensDetails {
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct SseEvent {
|
||||
pub struct ResponsesStreamEvent {
|
||||
#[serde(rename = "type")]
|
||||
kind: String,
|
||||
response: Option<Value>,
|
||||
@@ -136,6 +144,145 @@ struct SseEvent {
|
||||
content_index: Option<i64>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ResponsesEventError {
|
||||
Api(ApiError),
|
||||
}
|
||||
|
||||
impl ResponsesEventError {
|
||||
pub fn into_api_error(self) -> ApiError {
|
||||
match self {
|
||||
Self::Api(error) => error,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_responses_event(
|
||||
event: ResponsesStreamEvent,
|
||||
) -> std::result::Result<Option<ResponseEvent>, ResponsesEventError> {
|
||||
match event.kind.as_str() {
|
||||
"response.output_item.done" => {
|
||||
if let Some(item_val) = event.item {
|
||||
if let Ok(item) = serde_json::from_value::<ResponseItem>(item_val) {
|
||||
return Ok(Some(ResponseEvent::OutputItemDone(item)));
|
||||
}
|
||||
debug!("failed to parse ResponseItem from output_item.done");
|
||||
}
|
||||
}
|
||||
"response.output_text.delta" => {
|
||||
if let Some(delta) = event.delta {
|
||||
return Ok(Some(ResponseEvent::OutputTextDelta(delta)));
|
||||
}
|
||||
}
|
||||
"response.reasoning_summary_text.delta" => {
|
||||
if let (Some(delta), Some(summary_index)) = (event.delta, event.summary_index) {
|
||||
return Ok(Some(ResponseEvent::ReasoningSummaryDelta {
|
||||
delta,
|
||||
summary_index,
|
||||
}));
|
||||
}
|
||||
}
|
||||
"response.reasoning_text.delta" => {
|
||||
if let (Some(delta), Some(content_index)) = (event.delta, event.content_index) {
|
||||
return Ok(Some(ResponseEvent::ReasoningContentDelta {
|
||||
delta,
|
||||
content_index,
|
||||
}));
|
||||
}
|
||||
}
|
||||
"response.created" => {
|
||||
if event.response.is_some() {
|
||||
return Ok(Some(ResponseEvent::Created {}));
|
||||
}
|
||||
}
|
||||
"response.failed" => {
|
||||
if let Some(resp_val) = event.response {
|
||||
let mut response_error = ApiError::Stream("response.failed event received".into());
|
||||
if let Some(error) = resp_val.get("error")
|
||||
&& let Ok(error) = serde_json::from_value::<Error>(error.clone())
|
||||
{
|
||||
if is_context_window_error(&error) {
|
||||
response_error = ApiError::ContextWindowExceeded;
|
||||
} else if is_quota_exceeded_error(&error) {
|
||||
response_error = ApiError::QuotaExceeded;
|
||||
} else if is_usage_not_included(&error) {
|
||||
response_error = ApiError::UsageNotIncluded;
|
||||
} else {
|
||||
let delay = try_parse_retry_after(&error);
|
||||
let message = error.message.unwrap_or_default();
|
||||
response_error = ApiError::Retryable { message, delay };
|
||||
}
|
||||
}
|
||||
return Err(ResponsesEventError::Api(response_error));
|
||||
}
|
||||
|
||||
return Err(ResponsesEventError::Api(ApiError::Stream(
|
||||
"response.failed event received".into(),
|
||||
)));
|
||||
}
|
||||
"response.completed" => {
|
||||
if let Some(resp_val) = event.response {
|
||||
match serde_json::from_value::<ResponseCompleted>(resp_val) {
|
||||
Ok(resp) => {
|
||||
return Ok(Some(ResponseEvent::Completed {
|
||||
response_id: resp.id,
|
||||
token_usage: resp.usage.map(Into::into),
|
||||
}));
|
||||
}
|
||||
Err(err) => {
|
||||
let error = format!("failed to parse ResponseCompleted: {err}");
|
||||
debug!("{error}");
|
||||
return Err(ResponsesEventError::Api(ApiError::Stream(error)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"response.done" => {
|
||||
if let Some(resp_val) = event.response {
|
||||
match serde_json::from_value::<ResponseDone>(resp_val) {
|
||||
Ok(resp) => {
|
||||
return Ok(Some(ResponseEvent::Completed {
|
||||
response_id: resp.id.unwrap_or_default(),
|
||||
token_usage: resp.usage.map(Into::into),
|
||||
}));
|
||||
}
|
||||
Err(err) => {
|
||||
let error = format!("failed to parse ResponseCompleted: {err}");
|
||||
debug!("{error}");
|
||||
return Err(ResponsesEventError::Api(ApiError::Stream(error)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug!("response.done missing response payload");
|
||||
return Ok(Some(ResponseEvent::Completed {
|
||||
response_id: String::new(),
|
||||
token_usage: None,
|
||||
}));
|
||||
}
|
||||
"response.output_item.added" => {
|
||||
if let Some(item_val) = event.item {
|
||||
if let Ok(item) = serde_json::from_value::<ResponseItem>(item_val) {
|
||||
return Ok(Some(ResponseEvent::OutputItemAdded(item)));
|
||||
}
|
||||
debug!("failed to parse ResponseItem from output_item.done");
|
||||
}
|
||||
}
|
||||
"response.reasoning_summary_part.added" => {
|
||||
if let Some(summary_index) = event.summary_index {
|
||||
return Ok(Some(ResponseEvent::ReasoningSummaryPartAdded {
|
||||
summary_index,
|
||||
}));
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
trace!("unhandled responses event: {}", event.kind);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub async fn process_sse(
|
||||
stream: ByteStream,
|
||||
tx_event: mpsc::Sender<Result<ResponseEvent, ApiError>>,
|
||||
@@ -143,7 +290,7 @@ pub async fn process_sse(
|
||||
telemetry: Option<Arc<dyn SseTelemetry>>,
|
||||
) {
|
||||
let mut stream = stream.eventsource();
|
||||
let mut response_completed: Option<ResponseCompleted> = None;
|
||||
let mut response_completed: Option<ResponseEvent> = None;
|
||||
let mut response_error: Option<ApiError> = None;
|
||||
|
||||
loop {
|
||||
@@ -161,11 +308,7 @@ pub async fn process_sse(
|
||||
}
|
||||
Ok(None) => {
|
||||
match response_completed.take() {
|
||||
Some(ResponseCompleted { id, usage }) => {
|
||||
let event = ResponseEvent::Completed {
|
||||
response_id: id,
|
||||
token_usage: usage.map(Into::into),
|
||||
};
|
||||
Some(event) => {
|
||||
let _ = tx_event.send(Ok(event)).await;
|
||||
}
|
||||
None => {
|
||||
@@ -188,7 +331,7 @@ pub async fn process_sse(
|
||||
let raw = sse.data.clone();
|
||||
trace!("SSE event: {raw}");
|
||||
|
||||
let event: SseEvent = match serde_json::from_str(&sse.data) {
|
||||
let event: ResponsesStreamEvent = match serde_json::from_str(&sse.data) {
|
||||
Ok(event) => event,
|
||||
Err(e) => {
|
||||
debug!("Failed to parse SSE event: {e}, data: {}", &sse.data);
|
||||
@@ -196,115 +339,19 @@ pub async fn process_sse(
|
||||
}
|
||||
};
|
||||
|
||||
match event.kind.as_str() {
|
||||
"response.output_item.done" => {
|
||||
let Some(item_val) = event.item else { continue };
|
||||
let Ok(item) = serde_json::from_value::<ResponseItem>(item_val) else {
|
||||
debug!("failed to parse ResponseItem from output_item.done");
|
||||
continue;
|
||||
};
|
||||
|
||||
let event = ResponseEvent::OutputItemDone(item);
|
||||
if tx_event.send(Ok(event)).await.is_err() {
|
||||
match process_responses_event(event) {
|
||||
Ok(Some(event)) => {
|
||||
if matches!(event, ResponseEvent::Completed { .. }) {
|
||||
response_completed = Some(event);
|
||||
} else if tx_event.send(Ok(event)).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
"response.output_text.delta" => {
|
||||
if let Some(delta) = event.delta {
|
||||
let event = ResponseEvent::OutputTextDelta(delta);
|
||||
if tx_event.send(Ok(event)).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
Ok(None) => {}
|
||||
Err(error) => {
|
||||
response_error = Some(error.into_api_error());
|
||||
}
|
||||
"response.reasoning_summary_text.delta" => {
|
||||
if let (Some(delta), Some(summary_index)) = (event.delta, event.summary_index) {
|
||||
let event = ResponseEvent::ReasoningSummaryDelta {
|
||||
delta,
|
||||
summary_index,
|
||||
};
|
||||
if tx_event.send(Ok(event)).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
"response.reasoning_text.delta" => {
|
||||
if let (Some(delta), Some(content_index)) = (event.delta, event.content_index) {
|
||||
let event = ResponseEvent::ReasoningContentDelta {
|
||||
delta,
|
||||
content_index,
|
||||
};
|
||||
if tx_event.send(Ok(event)).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
"response.created" => {
|
||||
if event.response.is_some() {
|
||||
let _ = tx_event.send(Ok(ResponseEvent::Created {})).await;
|
||||
}
|
||||
}
|
||||
"response.failed" => {
|
||||
if let Some(resp_val) = event.response {
|
||||
response_error =
|
||||
Some(ApiError::Stream("response.failed event received".into()));
|
||||
|
||||
if let Some(error) = resp_val.get("error")
|
||||
&& let Ok(error) = serde_json::from_value::<Error>(error.clone())
|
||||
{
|
||||
if is_context_window_error(&error) {
|
||||
response_error = Some(ApiError::ContextWindowExceeded);
|
||||
} else if is_quota_exceeded_error(&error) {
|
||||
response_error = Some(ApiError::QuotaExceeded);
|
||||
} else if is_usage_not_included(&error) {
|
||||
response_error = Some(ApiError::UsageNotIncluded);
|
||||
} else {
|
||||
let delay = try_parse_retry_after(&error);
|
||||
let message = error.message.clone().unwrap_or_default();
|
||||
response_error = Some(ApiError::Retryable { message, delay });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"response.completed" => {
|
||||
if let Some(resp_val) = event.response {
|
||||
match serde_json::from_value::<ResponseCompleted>(resp_val) {
|
||||
Ok(r) => {
|
||||
response_completed = Some(r);
|
||||
}
|
||||
Err(e) => {
|
||||
let error = format!("failed to parse ResponseCompleted: {e}");
|
||||
debug!(error);
|
||||
response_error = Some(ApiError::Stream(error));
|
||||
continue;
|
||||
}
|
||||
};
|
||||
};
|
||||
}
|
||||
"response.output_item.added" => {
|
||||
let Some(item_val) = event.item else { continue };
|
||||
let Ok(item) = serde_json::from_value::<ResponseItem>(item_val) else {
|
||||
debug!("failed to parse ResponseItem from output_item.done");
|
||||
continue;
|
||||
};
|
||||
|
||||
let event = ResponseEvent::OutputItemAdded(item);
|
||||
if tx_event.send(Ok(event)).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
"response.reasoning_summary_part.added" => {
|
||||
if let Some(summary_index) = event.summary_index {
|
||||
let event = ResponseEvent::ReasoningSummaryPartAdded { summary_index };
|
||||
if tx_event.send(Ok(event)).await.is_err() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
trace!("unhandled SSE event: {:#?}", event.kind);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -501,6 +548,65 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn response_done_emits_completed() {
|
||||
let done = json!({
|
||||
"type": "response.done",
|
||||
"response": {
|
||||
"usage": {
|
||||
"input_tokens": 1,
|
||||
"input_tokens_details": null,
|
||||
"output_tokens": 2,
|
||||
"output_tokens_details": null,
|
||||
"total_tokens": 3
|
||||
}
|
||||
}
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let sse1 = format!("event: response.done\ndata: {done}\n\n");
|
||||
|
||||
let events = collect_events(&[sse1.as_bytes()]).await;
|
||||
|
||||
assert_eq!(events.len(), 1);
|
||||
|
||||
match &events[0] {
|
||||
Ok(ResponseEvent::Completed {
|
||||
response_id,
|
||||
token_usage,
|
||||
}) => {
|
||||
assert_eq!(response_id, "");
|
||||
assert!(token_usage.is_some());
|
||||
}
|
||||
other => panic!("unexpected event: {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn response_done_without_payload_emits_completed() {
|
||||
let done = json!({
|
||||
"type": "response.done"
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let sse1 = format!("event: response.done\ndata: {done}\n\n");
|
||||
|
||||
let events = collect_events(&[sse1.as_bytes()]).await;
|
||||
|
||||
assert_eq!(events.len(), 1);
|
||||
|
||||
match &events[0] {
|
||||
Ok(ResponseEvent::Completed {
|
||||
response_id,
|
||||
token_usage,
|
||||
}) => {
|
||||
assert_eq!(response_id, "");
|
||||
assert!(token_usage.is_none());
|
||||
}
|
||||
other => panic!("unexpected event: {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn error_when_error_event() {
|
||||
let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_689bcf18d7f08194bf3440ba62fe05d803fee0cdac429894","object":"response","created_at":1755041560,"status":"failed","background":false,"error":{"code":"rate_limit_exceeded","message":"Rate limit reached for gpt-5.1 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more."}, "usage":null,"user":null,"metadata":{}}}"#;
|
||||
|
||||
@@ -1,18 +1,52 @@
|
||||
//! OSS provider utilities shared between TUI and exec.
|
||||
|
||||
use codex_core::LMSTUDIO_OSS_PROVIDER_ID;
|
||||
use codex_core::OLLAMA_CHAT_PROVIDER_ID;
|
||||
use codex_core::OLLAMA_OSS_PROVIDER_ID;
|
||||
use codex_core::WireApi;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::protocol::DeprecationNoticeEvent;
|
||||
use std::io;
|
||||
|
||||
/// Returns the default model for a given OSS provider.
|
||||
pub fn get_default_model_for_oss_provider(provider_id: &str) -> Option<&'static str> {
|
||||
match provider_id {
|
||||
LMSTUDIO_OSS_PROVIDER_ID => Some(codex_lmstudio::DEFAULT_OSS_MODEL),
|
||||
OLLAMA_OSS_PROVIDER_ID => Some(codex_ollama::DEFAULT_OSS_MODEL),
|
||||
OLLAMA_OSS_PROVIDER_ID | OLLAMA_CHAT_PROVIDER_ID => Some(codex_ollama::DEFAULT_OSS_MODEL),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a deprecation notice if Ollama doesn't support the responses wire API.
|
||||
pub async fn ollama_chat_deprecation_notice(
|
||||
config: &Config,
|
||||
) -> io::Result<Option<DeprecationNoticeEvent>> {
|
||||
if config.model_provider_id != OLLAMA_OSS_PROVIDER_ID
|
||||
|| config.model_provider.wire_api != WireApi::Responses
|
||||
{
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if let Some(detection) = codex_ollama::detect_wire_api(&config.model_provider).await?
|
||||
&& detection.wire_api == WireApi::Chat
|
||||
{
|
||||
let version_suffix = detection
|
||||
.version
|
||||
.as_ref()
|
||||
.map(|version| format!(" (version {version})"))
|
||||
.unwrap_or_default();
|
||||
let summary = format!(
|
||||
"Your Ollama server{version_suffix} doesn't support the Responses API. Either update Ollama or set `oss_provider = \"{OLLAMA_CHAT_PROVIDER_ID}\"` (or `model_provider = \"{OLLAMA_CHAT_PROVIDER_ID}\"`) in your config.toml to use the \"chat\" wire API. Support for the \"chat\" wire API is deprecated and will soon be removed."
|
||||
);
|
||||
return Ok(Some(DeprecationNoticeEvent {
|
||||
summary,
|
||||
details: None,
|
||||
}));
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Ensures the specified OSS provider is ready (models downloaded, service reachable).
|
||||
pub async fn ensure_oss_provider_ready(
|
||||
provider_id: &str,
|
||||
@@ -24,7 +58,7 @@ pub async fn ensure_oss_provider_ready(
|
||||
.await
|
||||
.map_err(|e| std::io::Error::other(format!("OSS setup failed: {e}")))?;
|
||||
}
|
||||
OLLAMA_OSS_PROVIDER_ID => {
|
||||
OLLAMA_OSS_PROVIDER_ID | OLLAMA_CHAT_PROVIDER_ID => {
|
||||
codex_ollama::ensure_oss_ready(config)
|
||||
.await
|
||||
.map_err(|e| std::io::Error::other(format!("OSS setup failed: {e}")))?;
|
||||
|
||||
@@ -20,15 +20,18 @@ codex_rust_crate(
|
||||
"//codex-rs/apply-patch:apply_patch_tool_instructions.md",
|
||||
"prompt.md",
|
||||
],
|
||||
# This is a bit of a hack, but empirically, some of our integration tests
|
||||
# are relying on the presence of this file as a repo root marker. When
|
||||
# running tests locally, this "just works," but in remote execution,
|
||||
# the working directory is different and so the file is not found unless it
|
||||
# is explicitly added as test data.
|
||||
#
|
||||
# TODO(aibrahim): Update the tests so that `just bazel-remote-test` succeeds
|
||||
# without this workaround.
|
||||
test_data_extra = ["//:AGENTS.md"],
|
||||
test_data_extra = [
|
||||
"config.schema.json",
|
||||
# This is a bit of a hack, but empirically, some of our integration tests
|
||||
# are relying on the presence of this file as a repo root marker. When
|
||||
# running tests locally, this "just works," but in remote execution,
|
||||
# the working directory is different and so the file is not found unless it
|
||||
# is explicitly added as test data.
|
||||
#
|
||||
# TODO(aibrahim): Update the tests so that `just bazel-remote-test`
|
||||
# succeeds without this workaround.
|
||||
"//:AGENTS.md",
|
||||
],
|
||||
integration_deps_extra = ["//codex-rs/core/tests/common:common"],
|
||||
test_tags = ["no-sandbox"],
|
||||
extra_binaries = [
|
||||
|
||||
@@ -9,17 +9,22 @@ doctest = false
|
||||
name = "codex_core"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "codex-write-config-schema"
|
||||
path = "src/bin/config_schema.rs"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
arc-swap = "1.7.1"
|
||||
async-channel = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
arc-swap = "1.7.1"
|
||||
base64 = { workspace = true }
|
||||
chardetng = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
codex-api = { workspace = true }
|
||||
codex-app-server-protocol = { workspace = true }
|
||||
codex-apply-patch = { workspace = true }
|
||||
@@ -46,6 +51,7 @@ futures = { workspace = true }
|
||||
http = { workspace = true }
|
||||
include_dir = { workspace = true }
|
||||
indexmap = { workspace = true }
|
||||
indoc = { workspace = true }
|
||||
keyring = { workspace = true, features = ["crypto-rust"] }
|
||||
libc = { workspace = true }
|
||||
mcp-types = { workspace = true }
|
||||
@@ -55,6 +61,7 @@ rand = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
regex-lite = { workspace = true }
|
||||
reqwest = { workspace = true, features = ["json", "stream"] }
|
||||
schemars = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
@@ -122,8 +129,12 @@ keyring = { workspace = true, features = ["sync-secret-service"] }
|
||||
assert_cmd = { workspace = true }
|
||||
assert_matches = { workspace = true }
|
||||
codex-arg0 = { workspace = true }
|
||||
codex-core = { path = ".", default-features = false, features = ["deterministic_process_ids"] }
|
||||
codex-otel = { workspace = true, features = ["disable-default-metrics-exporter"] }
|
||||
codex-core = { path = ".", default-features = false, features = [
|
||||
"deterministic_process_ids",
|
||||
] }
|
||||
codex-otel = { workspace = true, features = [
|
||||
"disable-default-metrics-exporter",
|
||||
] }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
core_test_support = { workspace = true }
|
||||
ctor = { workspace = true }
|
||||
|
||||
1450
codex-rs/core/config.schema.json
Normal file
1450
codex-rs/core/config.schema.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -25,43 +25,6 @@ When using the planning tool:
|
||||
- Do not make single-step plans.
|
||||
- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan.
|
||||
|
||||
## Codex CLI harness, sandboxing, and approvals
|
||||
|
||||
The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from.
|
||||
|
||||
Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are:
|
||||
- **read-only**: The sandbox only permits reading files.
|
||||
- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval.
|
||||
- **danger-full-access**: No filesystem sandboxing - all commands are permitted.
|
||||
|
||||
Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are:
|
||||
- **restricted**: Requires approval
|
||||
- **enabled**: No approval needed
|
||||
|
||||
Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are
|
||||
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
||||
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
||||
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
|
||||
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
||||
|
||||
When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
||||
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var)
|
||||
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
|
||||
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
|
||||
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `sandbox_permissions` and `justification` parameters - do not message the user before requesting approval for the command.
|
||||
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for
|
||||
- (for all of these, you should weigh alternative paths that do not require approval)
|
||||
|
||||
When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read.
|
||||
|
||||
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure.
|
||||
|
||||
Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals.
|
||||
|
||||
When requesting approval to execute a command that will require escalated privileges:
|
||||
- Provide the `sandbox_permissions` parameter with the value `"require_escalated"`
|
||||
- Include a short, 1 sentence explanation for why you need escalated permissions in the justification parameter
|
||||
|
||||
## Special user requests
|
||||
|
||||
- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so.
|
||||
|
||||
@@ -25,43 +25,6 @@ When using the planning tool:
|
||||
- Do not make single-step plans.
|
||||
- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan.
|
||||
|
||||
## Codex CLI harness, sandboxing, and approvals
|
||||
|
||||
The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from.
|
||||
|
||||
Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are:
|
||||
- **read-only**: The sandbox only permits reading files.
|
||||
- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval.
|
||||
- **danger-full-access**: No filesystem sandboxing - all commands are permitted.
|
||||
|
||||
Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are:
|
||||
- **restricted**: Requires approval
|
||||
- **enabled**: No approval needed
|
||||
|
||||
Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are
|
||||
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
||||
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
||||
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
|
||||
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
||||
|
||||
When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
||||
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var)
|
||||
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
|
||||
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
|
||||
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `sandbox_permissions` and `justification` parameters - do not message the user before requesting approval for the command.
|
||||
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for
|
||||
- (for all of these, you should weigh alternative paths that do not require approval)
|
||||
|
||||
When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read.
|
||||
|
||||
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure.
|
||||
|
||||
Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals.
|
||||
|
||||
When requesting approval to execute a command that will require escalated privileges:
|
||||
- Provide the `sandbox_permissions` parameter with the value `"require_escalated"`
|
||||
- Include a short, 1 sentence explanation for why you need escalated permissions in the justification parameter
|
||||
|
||||
## Special user requests
|
||||
|
||||
- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so.
|
||||
|
||||
@@ -159,43 +159,6 @@ If completing the user's task requires writing or modifying files, your code and
|
||||
- Do not use one-letter variable names unless explicitly requested.
|
||||
- NEVER output inline citations like "【F:README.md†L5-L14】" in your outputs. The CLI is not able to render these so they will just be broken in the UI. Instead, if you output valid filepaths, users will be able to click on them to open the files in their editor.
|
||||
|
||||
## Codex CLI harness, sandboxing, and approvals
|
||||
|
||||
The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from.
|
||||
|
||||
Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are:
|
||||
- **read-only**: The sandbox only permits reading files.
|
||||
- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval.
|
||||
- **danger-full-access**: No filesystem sandboxing - all commands are permitted.
|
||||
|
||||
Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are:
|
||||
- **restricted**: Requires approval
|
||||
- **enabled**: No approval needed
|
||||
|
||||
Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are
|
||||
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
||||
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
||||
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for escalating in the tool definition.)
|
||||
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
||||
|
||||
When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
||||
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var)
|
||||
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
|
||||
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
|
||||
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `sandbox_permissions` and `justification` parameters. Within this harness, prefer requesting approval via the tool over asking in natural language.
|
||||
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for
|
||||
- (for all of these, you should weigh alternative paths that do not require approval)
|
||||
|
||||
When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read.
|
||||
|
||||
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure.
|
||||
|
||||
Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals.
|
||||
|
||||
When requesting approval to execute a command that will require escalated privileges:
|
||||
- Provide the `sandbox_permissions` parameter with the value `"require_escalated"`
|
||||
- Include a short, 1 sentence explanation for why you need escalated permissions in the justification parameter
|
||||
|
||||
## Validating your work
|
||||
|
||||
If the codebase has tests or the ability to build or run, consider using them to verify changes once your work is complete.
|
||||
|
||||
@@ -133,43 +133,6 @@ If completing the user's task requires writing or modifying files, your code and
|
||||
- Do not use one-letter variable names unless explicitly requested.
|
||||
- NEVER output inline citations like "【F:README.md†L5-L14】" in your outputs. The CLI is not able to render these so they will just be broken in the UI. Instead, if you output valid filepaths, users will be able to click on them to open the files in their editor.
|
||||
|
||||
## Codex CLI harness, sandboxing, and approvals
|
||||
|
||||
The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from.
|
||||
|
||||
Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are:
|
||||
- **read-only**: The sandbox only permits reading files.
|
||||
- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval.
|
||||
- **danger-full-access**: No filesystem sandboxing - all commands are permitted.
|
||||
|
||||
Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are:
|
||||
- **restricted**: Requires approval
|
||||
- **enabled**: No approval needed
|
||||
|
||||
Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are
|
||||
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
||||
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
||||
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for escalating in the tool definition.)
|
||||
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
||||
|
||||
When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
||||
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var)
|
||||
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
|
||||
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
|
||||
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `sandbox_permissions` and `justification` parameters - do not message the user before requesting approval for the command.
|
||||
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for
|
||||
- (for all of these, you should weigh alternative paths that do not require approval)
|
||||
|
||||
When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read.
|
||||
|
||||
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure.
|
||||
|
||||
Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals.
|
||||
|
||||
When requesting approval to execute a command that will require escalated privileges:
|
||||
- Provide the `sandbox_permissions` parameter with the value `"require_escalated"`
|
||||
- Include a short, 1 sentence explanation for why you need escalated permissions in the justification parameter
|
||||
|
||||
## Validating your work
|
||||
|
||||
If the codebase has tests, or the ability to build or run tests, consider using them to verify changes once your work is complete.
|
||||
|
||||
@@ -25,43 +25,6 @@ When using the planning tool:
|
||||
- Do not make single-step plans.
|
||||
- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan.
|
||||
|
||||
## Codex CLI harness, sandboxing, and approvals
|
||||
|
||||
The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from.
|
||||
|
||||
Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are:
|
||||
- **read-only**: The sandbox only permits reading files.
|
||||
- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval.
|
||||
- **danger-full-access**: No filesystem sandboxing - all commands are permitted.
|
||||
|
||||
Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are:
|
||||
- **restricted**: Requires approval
|
||||
- **enabled**: No approval needed
|
||||
|
||||
Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are
|
||||
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
||||
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
||||
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
|
||||
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
||||
|
||||
When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
||||
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var)
|
||||
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
|
||||
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
|
||||
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `sandbox_permissions` and `justification` parameters - do not message the user before requesting approval for the command.
|
||||
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for
|
||||
- (for all of these, you should weigh alternative paths that do not require approval)
|
||||
|
||||
When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read.
|
||||
|
||||
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure.
|
||||
|
||||
Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals.
|
||||
|
||||
When requesting approval to execute a command that will require escalated privileges:
|
||||
- Provide the `sandbox_permissions` parameter with the value `"require_escalated"`
|
||||
- Include a short, 1 sentence explanation for why you need escalated permissions in the justification parameter
|
||||
|
||||
## Special user requests
|
||||
|
||||
- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so.
|
||||
|
||||
@@ -146,41 +146,6 @@ If completing the user's task requires writing or modifying files, your code and
|
||||
- Do not use one-letter variable names unless explicitly requested.
|
||||
- NEVER output inline citations like "【F:README.md†L5-L14】" in your outputs. The CLI is not able to render these so they will just be broken in the UI. Instead, if you output valid filepaths, users will be able to click on them to open the files in their editor.
|
||||
|
||||
## Sandbox and approvals
|
||||
|
||||
The Codex CLI harness supports several different sandboxing, and approval configurations that the user can choose from.
|
||||
|
||||
Filesystem sandboxing prevents you from editing files without user approval. The options are:
|
||||
|
||||
- **read-only**: You can only read files.
|
||||
- **workspace-write**: You can read files. You can write to files in your workspace folder, but not outside it.
|
||||
- **danger-full-access**: No filesystem sandboxing.
|
||||
|
||||
Network sandboxing prevents you from accessing network without approval. Options are
|
||||
|
||||
- **restricted**
|
||||
- **enabled**
|
||||
|
||||
Approvals are your mechanism to get user consent to perform more privileged actions. Although they introduce friction to the user because your work is paused until the user responds, you should leverage them to accomplish your important work. Do not let these settings or the sandbox deter you from attempting to accomplish the user's task. Approval options are
|
||||
|
||||
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
||||
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
||||
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
|
||||
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is pared with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
||||
|
||||
When you are running with approvals `on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
||||
|
||||
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /tmp)
|
||||
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
|
||||
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
|
||||
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval.
|
||||
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for
|
||||
- (For all of these, you should weigh alternative paths that do not require approval.)
|
||||
|
||||
Note that when sandboxing is set to read-only, you'll need to request approval for any command that isn't a read.
|
||||
|
||||
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing ON, and approval on-failure.
|
||||
|
||||
## Validating your work
|
||||
|
||||
If the codebase has tests or the ability to build or run, consider using them to verify that your work is complete.
|
||||
|
||||
@@ -146,41 +146,6 @@ If completing the user's task requires writing or modifying files, your code and
|
||||
- Do not use one-letter variable names unless explicitly requested.
|
||||
- NEVER output inline citations like "【F:README.md†L5-L14】" in your outputs. The CLI is not able to render these so they will just be broken in the UI. Instead, if you output valid filepaths, users will be able to click on them to open the files in their editor.
|
||||
|
||||
## Sandbox and approvals
|
||||
|
||||
The Codex CLI harness supports several different sandboxing, and approval configurations that the user can choose from.
|
||||
|
||||
Filesystem sandboxing prevents you from editing files without user approval. The options are:
|
||||
|
||||
- **read-only**: You can only read files.
|
||||
- **workspace-write**: You can read files. You can write to files in your workspace folder, but not outside it.
|
||||
- **danger-full-access**: No filesystem sandboxing.
|
||||
|
||||
Network sandboxing prevents you from accessing network without approval. Options are
|
||||
|
||||
- **restricted**
|
||||
- **enabled**
|
||||
|
||||
Approvals are your mechanism to get user consent to perform more privileged actions. Although they introduce friction to the user because your work is paused until the user responds, you should leverage them to accomplish your important work. Do not let these settings or the sandbox deter you from attempting to accomplish the user's task. Approval options are
|
||||
|
||||
- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands.
|
||||
- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox.
|
||||
- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.)
|
||||
- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is pared with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding.
|
||||
|
||||
When you are running with approvals `on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval:
|
||||
|
||||
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /tmp)
|
||||
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
|
||||
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
|
||||
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval.
|
||||
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for
|
||||
- (For all of these, you should weigh alternative paths that do not require approval.)
|
||||
|
||||
Note that when sandboxing is set to read-only, you'll need to request approval for any command that isn't a read.
|
||||
|
||||
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing ON, and approval on-failure.
|
||||
|
||||
## Validating your work
|
||||
|
||||
If the codebase has tests or the ability to build or run, consider using them to verify that your work is complete.
|
||||
|
||||
@@ -57,7 +57,7 @@ impl AgentControl {
|
||||
prompt: String,
|
||||
) -> CodexResult<String> {
|
||||
let state = self.upgrade()?;
|
||||
state
|
||||
let result = state
|
||||
.send_op(
|
||||
agent_id,
|
||||
Op::UserInput {
|
||||
@@ -65,13 +65,19 @@ impl AgentControl {
|
||||
final_output_json_schema: None,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.await;
|
||||
if matches!(result, Err(CodexErr::InternalAgentDied)) {
|
||||
let _ = state.remove_thread(&agent_id).await;
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Submit a shutdown request to an existing agent thread.
|
||||
pub(crate) async fn shutdown_agent(&self, agent_id: ThreadId) -> CodexResult<String> {
|
||||
let state = self.upgrade()?;
|
||||
state.send_op(agent_id, Op::Shutdown {}).await
|
||||
let result = state.send_op(agent_id, Op::Shutdown {}).await;
|
||||
let _ = state.remove_thread(&agent_id).await;
|
||||
result
|
||||
}
|
||||
|
||||
#[allow(dead_code)] // Will be used for collab tools.
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use sha2::Digest;
|
||||
@@ -21,7 +22,7 @@ use codex_keyring_store::DefaultKeyringStore;
|
||||
use codex_keyring_store::KeyringStore;
|
||||
|
||||
/// Determine where Codex should store CLI auth credentials.
|
||||
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum AuthCredentialsStoreMode {
|
||||
#[default]
|
||||
|
||||
20
codex-rs/core/src/bin/config_schema.rs
Normal file
20
codex-rs/core/src/bin/config_schema.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
use anyhow::Result;
|
||||
use clap::Parser;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Generate the JSON Schema for `config.toml` and write it to `config.schema.json`.
|
||||
#[derive(Parser)]
|
||||
#[command(name = "codex-write-config-schema")]
|
||||
struct Args {
|
||||
#[arg(short, long, value_name = "PATH")]
|
||||
out: Option<PathBuf>,
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let args = Args::parse();
|
||||
let out_path = args
|
||||
.out
|
||||
.unwrap_or_else(|| PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("config.schema.json"));
|
||||
codex_core::config::schema::write_config_schema(&out_path)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::api_bridge::CoreAuthProvider;
|
||||
use crate::api_bridge::auth_provider_from_auth;
|
||||
use crate::api_bridge::map_api_error;
|
||||
use crate::auth::UnauthorizedRecovery;
|
||||
@@ -10,12 +11,18 @@ use codex_api::CompactionInput as ApiCompactionInput;
|
||||
use codex_api::Prompt as ApiPrompt;
|
||||
use codex_api::RequestTelemetry;
|
||||
use codex_api::ReqwestTransport;
|
||||
use codex_api::ResponseAppendWsRequest;
|
||||
use codex_api::ResponseCreateWsRequest;
|
||||
use codex_api::ResponseStream as ApiResponseStream;
|
||||
use codex_api::ResponsesClient as ApiResponsesClient;
|
||||
use codex_api::ResponsesOptions as ApiResponsesOptions;
|
||||
use codex_api::ResponsesWebsocketClient as ApiWebSocketResponsesClient;
|
||||
use codex_api::ResponsesWebsocketConnection as ApiWebSocketConnection;
|
||||
use codex_api::SseTelemetry;
|
||||
use codex_api::TransportError;
|
||||
use codex_api::build_conversation_headers;
|
||||
use codex_api::common::Reasoning;
|
||||
use codex_api::common::ResponsesWsRequest;
|
||||
use codex_api::create_text_param_for_request;
|
||||
use codex_api::error::ApiError;
|
||||
use codex_api::requests::responses::Compression;
|
||||
@@ -57,8 +64,8 @@ use crate::model_provider_info::WireApi;
|
||||
use crate::tools::spec::create_tools_json_for_chat_completions_api;
|
||||
use crate::tools::spec::create_tools_json_for_responses_api;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ModelClient {
|
||||
#[derive(Debug)]
|
||||
struct ModelClientState {
|
||||
config: Arc<Config>,
|
||||
auth_manager: Option<Arc<AuthManager>>,
|
||||
model_info: ModelInfo,
|
||||
@@ -70,6 +77,17 @@ pub struct ModelClient {
|
||||
session_source: SessionSource,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ModelClient {
|
||||
state: Arc<ModelClientState>,
|
||||
}
|
||||
|
||||
pub struct ModelClientSession {
|
||||
state: Arc<ModelClientState>,
|
||||
connection: Option<ApiWebSocketConnection>,
|
||||
websocket_last_items: Vec<ResponseItem>,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
impl ModelClient {
|
||||
pub fn new(
|
||||
@@ -84,20 +102,32 @@ impl ModelClient {
|
||||
session_source: SessionSource,
|
||||
) -> Self {
|
||||
Self {
|
||||
config,
|
||||
auth_manager,
|
||||
model_info,
|
||||
otel_manager,
|
||||
provider,
|
||||
conversation_id,
|
||||
effort,
|
||||
summary,
|
||||
session_source,
|
||||
state: Arc::new(ModelClientState {
|
||||
config,
|
||||
auth_manager,
|
||||
model_info,
|
||||
otel_manager,
|
||||
provider,
|
||||
conversation_id,
|
||||
effort,
|
||||
summary,
|
||||
session_source,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_session(&self) -> ModelClientSession {
|
||||
ModelClientSession {
|
||||
state: Arc::clone(&self.state),
|
||||
connection: None,
|
||||
websocket_last_items: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ModelClient {
|
||||
pub fn get_model_context_window(&self) -> Option<i64> {
|
||||
let model_info = self.get_model_info();
|
||||
let model_info = &self.state.model_info;
|
||||
let effective_context_window_percent = model_info.effective_context_window_percent;
|
||||
model_info.context_window.map(|context_window| {
|
||||
context_window.saturating_mul(effective_context_window_percent) / 100
|
||||
@@ -105,39 +135,290 @@ impl ModelClient {
|
||||
}
|
||||
|
||||
pub fn config(&self) -> Arc<Config> {
|
||||
Arc::clone(&self.config)
|
||||
Arc::clone(&self.state.config)
|
||||
}
|
||||
|
||||
pub fn provider(&self) -> &ModelProviderInfo {
|
||||
&self.provider
|
||||
&self.state.provider
|
||||
}
|
||||
|
||||
pub fn get_provider(&self) -> ModelProviderInfo {
|
||||
self.state.provider.clone()
|
||||
}
|
||||
|
||||
pub fn get_otel_manager(&self) -> OtelManager {
|
||||
self.state.otel_manager.clone()
|
||||
}
|
||||
|
||||
pub fn get_session_source(&self) -> SessionSource {
|
||||
self.state.session_source.clone()
|
||||
}
|
||||
|
||||
/// Returns the currently configured model slug.
|
||||
pub fn get_model(&self) -> String {
|
||||
self.state.model_info.slug.clone()
|
||||
}
|
||||
|
||||
pub fn get_model_info(&self) -> ModelInfo {
|
||||
self.state.model_info.clone()
|
||||
}
|
||||
|
||||
/// Returns the current reasoning effort setting.
|
||||
pub fn get_reasoning_effort(&self) -> Option<ReasoningEffortConfig> {
|
||||
self.state.effort
|
||||
}
|
||||
|
||||
/// Returns the current reasoning summary setting.
|
||||
pub fn get_reasoning_summary(&self) -> ReasoningSummaryConfig {
|
||||
self.state.summary
|
||||
}
|
||||
|
||||
pub fn get_auth_manager(&self) -> Option<Arc<AuthManager>> {
|
||||
self.state.auth_manager.clone()
|
||||
}
|
||||
|
||||
/// Compacts the current conversation history using the Compact endpoint.
|
||||
///
|
||||
/// This is a unary call (no streaming) that returns a new list of
|
||||
/// `ResponseItem`s representing the compacted transcript.
|
||||
pub async fn compact_conversation_history(&self, prompt: &Prompt) -> Result<Vec<ResponseItem>> {
|
||||
if prompt.input.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
let auth_manager = self.state.auth_manager.clone();
|
||||
let auth = match auth_manager.as_ref() {
|
||||
Some(manager) => manager.auth().await,
|
||||
None => None,
|
||||
};
|
||||
let api_provider = self
|
||||
.state
|
||||
.provider
|
||||
.to_api_provider(auth.as_ref().map(|a| a.mode))?;
|
||||
let api_auth = auth_provider_from_auth(auth.clone(), &self.state.provider)?;
|
||||
let transport = ReqwestTransport::new(build_reqwest_client());
|
||||
let request_telemetry = self.build_request_telemetry();
|
||||
let client = ApiCompactClient::new(transport, api_provider, api_auth)
|
||||
.with_telemetry(Some(request_telemetry));
|
||||
|
||||
let instructions = prompt
|
||||
.get_full_instructions(&self.state.model_info)
|
||||
.into_owned();
|
||||
let payload = ApiCompactionInput {
|
||||
model: &self.state.model_info.slug,
|
||||
input: &prompt.input,
|
||||
instructions: &instructions,
|
||||
};
|
||||
|
||||
let mut extra_headers = ApiHeaderMap::new();
|
||||
if let SessionSource::SubAgent(sub) = &self.state.session_source {
|
||||
let subagent = if let crate::protocol::SubAgentSource::Other(label) = sub {
|
||||
label.clone()
|
||||
} else {
|
||||
serde_json::to_value(sub)
|
||||
.ok()
|
||||
.and_then(|v| v.as_str().map(std::string::ToString::to_string))
|
||||
.unwrap_or_else(|| "other".to_string())
|
||||
};
|
||||
if let Ok(val) = HeaderValue::from_str(&subagent) {
|
||||
extra_headers.insert("x-openai-subagent", val);
|
||||
}
|
||||
}
|
||||
|
||||
client
|
||||
.compact_input(&payload, extra_headers)
|
||||
.await
|
||||
.map_err(map_api_error)
|
||||
}
|
||||
}
|
||||
|
||||
impl ModelClientSession {
|
||||
/// Streams a single model turn using either the Responses or Chat
|
||||
/// Completions wire API, depending on the configured provider.
|
||||
///
|
||||
/// For Chat providers, the underlying stream is optionally aggregated
|
||||
/// based on the `show_raw_agent_reasoning` flag in the config.
|
||||
pub async fn stream(&self, prompt: &Prompt) -> Result<ResponseStream> {
|
||||
match self.provider.wire_api {
|
||||
pub async fn stream(&mut self, prompt: &Prompt) -> Result<ResponseStream> {
|
||||
match self.state.provider.wire_api {
|
||||
WireApi::Responses => self.stream_responses_api(prompt).await,
|
||||
WireApi::ResponsesWebsocket => self.stream_responses_websocket(prompt).await,
|
||||
WireApi::Chat => {
|
||||
let api_stream = self.stream_chat_completions(prompt).await?;
|
||||
|
||||
if self.config.show_raw_agent_reasoning {
|
||||
if self.state.config.show_raw_agent_reasoning {
|
||||
Ok(map_response_stream(
|
||||
api_stream.streaming_mode(),
|
||||
self.otel_manager.clone(),
|
||||
self.state.otel_manager.clone(),
|
||||
))
|
||||
} else {
|
||||
Ok(map_response_stream(
|
||||
api_stream.aggregate(),
|
||||
self.otel_manager.clone(),
|
||||
self.state.otel_manager.clone(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn build_responses_request(&self, prompt: &Prompt) -> Result<ApiPrompt> {
|
||||
let model_info = self.state.model_info.clone();
|
||||
let instructions = prompt.get_full_instructions(&model_info).into_owned();
|
||||
let tools_json: Vec<Value> = create_tools_json_for_responses_api(&prompt.tools)?;
|
||||
Ok(build_api_prompt(prompt, instructions, tools_json))
|
||||
}
|
||||
|
||||
fn build_responses_options(
|
||||
&self,
|
||||
prompt: &Prompt,
|
||||
compression: Compression,
|
||||
) -> ApiResponsesOptions {
|
||||
let model_info = &self.state.model_info;
|
||||
|
||||
let default_reasoning_effort = model_info.default_reasoning_level;
|
||||
let reasoning = if model_info.supports_reasoning_summaries {
|
||||
Some(Reasoning {
|
||||
effort: self.state.effort.or(default_reasoning_effort),
|
||||
summary: if self.state.summary == ReasoningSummaryConfig::None {
|
||||
None
|
||||
} else {
|
||||
Some(self.state.summary)
|
||||
},
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let include = if reasoning.is_some() {
|
||||
vec!["reasoning.encrypted_content".to_string()]
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
let verbosity = if model_info.support_verbosity {
|
||||
self.state
|
||||
.config
|
||||
.model_verbosity
|
||||
.or(model_info.default_verbosity)
|
||||
} else {
|
||||
if self.state.config.model_verbosity.is_some() {
|
||||
warn!(
|
||||
"model_verbosity is set but ignored as the model does not support verbosity: {}",
|
||||
model_info.slug
|
||||
);
|
||||
}
|
||||
None
|
||||
};
|
||||
|
||||
let text = create_text_param_for_request(verbosity, &prompt.output_schema);
|
||||
let conversation_id = self.state.conversation_id.to_string();
|
||||
|
||||
ApiResponsesOptions {
|
||||
reasoning,
|
||||
include,
|
||||
prompt_cache_key: Some(conversation_id.clone()),
|
||||
text,
|
||||
store_override: None,
|
||||
conversation_id: Some(conversation_id),
|
||||
session_source: Some(self.state.session_source.clone()),
|
||||
extra_headers: beta_feature_headers(&self.state.config),
|
||||
compression,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_incremental_items(&self, input_items: &[ResponseItem]) -> Option<Vec<ResponseItem>> {
|
||||
// Checks whether the current request input is an incremental append to the previous request.
|
||||
// If items in the new request contain all the items from the previous request we build
|
||||
// a response.append request otherwise we start with a fresh response.create request.
|
||||
let previous_len = self.websocket_last_items.len();
|
||||
let can_append = previous_len > 0
|
||||
&& input_items.starts_with(&self.websocket_last_items)
|
||||
&& previous_len < input_items.len();
|
||||
if can_append {
|
||||
Some(input_items[previous_len..].to_vec())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn prepare_websocket_request(
|
||||
&self,
|
||||
api_prompt: &ApiPrompt,
|
||||
options: &ApiResponsesOptions,
|
||||
) -> ResponsesWsRequest {
|
||||
if let Some(append_items) = self.get_incremental_items(&api_prompt.input) {
|
||||
return ResponsesWsRequest::ResponseAppend(ResponseAppendWsRequest {
|
||||
input: append_items,
|
||||
});
|
||||
}
|
||||
|
||||
let ApiResponsesOptions {
|
||||
reasoning,
|
||||
include,
|
||||
prompt_cache_key,
|
||||
text,
|
||||
store_override,
|
||||
..
|
||||
} = options;
|
||||
|
||||
let store = store_override.unwrap_or(false);
|
||||
let payload = ResponseCreateWsRequest {
|
||||
model: self.state.model_info.slug.clone(),
|
||||
instructions: api_prompt.instructions.clone(),
|
||||
input: api_prompt.input.clone(),
|
||||
tools: api_prompt.tools.clone(),
|
||||
tool_choice: "auto".to_string(),
|
||||
parallel_tool_calls: api_prompt.parallel_tool_calls,
|
||||
reasoning: reasoning.clone(),
|
||||
store,
|
||||
stream: true,
|
||||
include: include.clone(),
|
||||
prompt_cache_key: prompt_cache_key.clone(),
|
||||
text: text.clone(),
|
||||
};
|
||||
|
||||
ResponsesWsRequest::ResponseCreate(payload)
|
||||
}
|
||||
|
||||
async fn websocket_connection(
|
||||
&mut self,
|
||||
api_provider: codex_api::Provider,
|
||||
api_auth: CoreAuthProvider,
|
||||
options: &ApiResponsesOptions,
|
||||
) -> std::result::Result<&ApiWebSocketConnection, ApiError> {
|
||||
let needs_new = match self.connection.as_ref() {
|
||||
Some(conn) => conn.is_closed().await,
|
||||
None => true,
|
||||
};
|
||||
|
||||
if needs_new {
|
||||
let mut headers = options.extra_headers.clone();
|
||||
headers.extend(build_conversation_headers(options.conversation_id.clone()));
|
||||
let new_conn: ApiWebSocketConnection =
|
||||
ApiWebSocketResponsesClient::new(api_provider, api_auth)
|
||||
.connect(headers)
|
||||
.await?;
|
||||
self.connection = Some(new_conn);
|
||||
}
|
||||
|
||||
self.connection.as_ref().ok_or(ApiError::Stream(
|
||||
"websocket connection is unavailable".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
fn responses_request_compression(&self, auth: Option<&crate::auth::CodexAuth>) -> Compression {
|
||||
if self
|
||||
.state
|
||||
.config
|
||||
.features
|
||||
.enabled(Feature::EnableRequestCompression)
|
||||
&& auth.is_some_and(|auth| auth.mode == AuthMode::ChatGPT)
|
||||
&& self.state.provider.is_openai()
|
||||
{
|
||||
Compression::Zstd
|
||||
} else {
|
||||
Compression::None
|
||||
}
|
||||
}
|
||||
|
||||
/// Streams a turn via the OpenAI Chat Completions API.
|
||||
///
|
||||
/// This path is only used when the provider is configured with
|
||||
@@ -149,13 +430,13 @@ impl ModelClient {
|
||||
));
|
||||
}
|
||||
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
let model_info = self.get_model_info();
|
||||
let auth_manager = self.state.auth_manager.clone();
|
||||
let model_info = self.state.model_info.clone();
|
||||
let instructions = prompt.get_full_instructions(&model_info).into_owned();
|
||||
let tools_json = create_tools_json_for_chat_completions_api(&prompt.tools)?;
|
||||
let api_prompt = build_api_prompt(prompt, instructions, tools_json);
|
||||
let conversation_id = self.conversation_id.to_string();
|
||||
let session_source = self.session_source.clone();
|
||||
let conversation_id = self.state.conversation_id.to_string();
|
||||
let session_source = self.state.session_source.clone();
|
||||
|
||||
let mut auth_recovery = auth_manager
|
||||
.as_ref()
|
||||
@@ -166,9 +447,10 @@ impl ModelClient {
|
||||
None => None,
|
||||
};
|
||||
let api_provider = self
|
||||
.state
|
||||
.provider
|
||||
.to_api_provider(auth.as_ref().map(|a| a.mode))?;
|
||||
let api_auth = auth_provider_from_auth(auth.clone(), &self.provider)?;
|
||||
let api_auth = auth_provider_from_auth(auth.clone(), &self.state.provider)?;
|
||||
let transport = ReqwestTransport::new(build_reqwest_client());
|
||||
let (request_telemetry, sse_telemetry) = self.build_streaming_telemetry();
|
||||
let client = ApiChatClient::new(transport, api_provider, api_auth)
|
||||
@@ -176,7 +458,7 @@ impl ModelClient {
|
||||
|
||||
let stream_result = client
|
||||
.stream_prompt(
|
||||
&self.get_model(),
|
||||
&self.state.model_info.slug,
|
||||
&api_prompt,
|
||||
Some(conversation_id.clone()),
|
||||
Some(session_source.clone()),
|
||||
@@ -203,52 +485,14 @@ impl ModelClient {
|
||||
async fn stream_responses_api(&self, prompt: &Prompt) -> Result<ResponseStream> {
|
||||
if let Some(path) = &*CODEX_RS_SSE_FIXTURE {
|
||||
warn!(path, "Streaming from fixture");
|
||||
let stream = codex_api::stream_from_fixture(path, self.provider.stream_idle_timeout())
|
||||
.map_err(map_api_error)?;
|
||||
return Ok(map_response_stream(stream, self.otel_manager.clone()));
|
||||
let stream =
|
||||
codex_api::stream_from_fixture(path, self.state.provider.stream_idle_timeout())
|
||||
.map_err(map_api_error)?;
|
||||
return Ok(map_response_stream(stream, self.state.otel_manager.clone()));
|
||||
}
|
||||
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
let model_info = self.get_model_info();
|
||||
let instructions = prompt.get_full_instructions(&model_info).into_owned();
|
||||
let tools_json: Vec<Value> = create_tools_json_for_responses_api(&prompt.tools)?;
|
||||
|
||||
let default_reasoning_effort = model_info.default_reasoning_level;
|
||||
let reasoning = if model_info.supports_reasoning_summaries {
|
||||
Some(Reasoning {
|
||||
effort: self.effort.or(default_reasoning_effort),
|
||||
summary: if self.summary == ReasoningSummaryConfig::None {
|
||||
None
|
||||
} else {
|
||||
Some(self.summary)
|
||||
},
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let include: Vec<String> = if reasoning.is_some() {
|
||||
vec!["reasoning.encrypted_content".to_string()]
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
let verbosity = if model_info.support_verbosity {
|
||||
self.config.model_verbosity.or(model_info.default_verbosity)
|
||||
} else {
|
||||
if self.config.model_verbosity.is_some() {
|
||||
warn!(
|
||||
"model_verbosity is set but ignored as the model does not support verbosity: {}",
|
||||
model_info.slug
|
||||
);
|
||||
}
|
||||
None
|
||||
};
|
||||
|
||||
let text = create_text_param_for_request(verbosity, &prompt.output_schema);
|
||||
let api_prompt = build_api_prompt(prompt, instructions.clone(), tools_json);
|
||||
let conversation_id = self.conversation_id.to_string();
|
||||
let session_source = self.session_source.clone();
|
||||
let auth_manager = self.state.auth_manager.clone();
|
||||
let api_prompt = self.build_responses_request(prompt)?;
|
||||
|
||||
let mut auth_recovery = auth_manager
|
||||
.as_ref()
|
||||
@@ -259,47 +503,26 @@ impl ModelClient {
|
||||
None => None,
|
||||
};
|
||||
let api_provider = self
|
||||
.state
|
||||
.provider
|
||||
.to_api_provider(auth.as_ref().map(|a| a.mode))?;
|
||||
let api_auth = auth_provider_from_auth(auth.clone(), &self.provider)?;
|
||||
let api_auth = auth_provider_from_auth(auth.clone(), &self.state.provider)?;
|
||||
let transport = ReqwestTransport::new(build_reqwest_client());
|
||||
let (request_telemetry, sse_telemetry) = self.build_streaming_telemetry();
|
||||
let compression = if self
|
||||
.config
|
||||
.features
|
||||
.enabled(Feature::EnableRequestCompression)
|
||||
&& auth
|
||||
.as_ref()
|
||||
.is_some_and(|auth| auth.mode == AuthMode::ChatGPT)
|
||||
&& self.provider.is_openai()
|
||||
{
|
||||
Compression::Zstd
|
||||
} else {
|
||||
Compression::None
|
||||
};
|
||||
let compression = self.responses_request_compression(auth.as_ref());
|
||||
|
||||
let client = ApiResponsesClient::new(transport, api_provider, api_auth)
|
||||
.with_telemetry(Some(request_telemetry), Some(sse_telemetry));
|
||||
|
||||
let options = ApiResponsesOptions {
|
||||
reasoning: reasoning.clone(),
|
||||
include: include.clone(),
|
||||
prompt_cache_key: Some(conversation_id.clone()),
|
||||
text: text.clone(),
|
||||
store_override: None,
|
||||
conversation_id: Some(conversation_id.clone()),
|
||||
session_source: Some(session_source.clone()),
|
||||
extra_headers: beta_feature_headers(&self.config),
|
||||
compression,
|
||||
};
|
||||
let options = self.build_responses_options(prompt, compression);
|
||||
|
||||
let stream_result = client
|
||||
.stream_prompt(&self.get_model(), &api_prompt, options)
|
||||
.stream_prompt(&self.state.model_info.slug, &api_prompt, options)
|
||||
.await;
|
||||
|
||||
match stream_result {
|
||||
Ok(stream) => {
|
||||
return Ok(map_response_stream(stream, self.otel_manager.clone()));
|
||||
return Ok(map_response_stream(stream, self.state.otel_manager.clone()));
|
||||
}
|
||||
Err(ApiError::Transport(TransportError::Http { status, .. }))
|
||||
if status == StatusCode::UNAUTHORIZED =>
|
||||
@@ -312,106 +535,69 @@ impl ModelClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_provider(&self) -> ModelProviderInfo {
|
||||
self.provider.clone()
|
||||
}
|
||||
/// Streams a turn via the Responses API over WebSocket transport.
|
||||
async fn stream_responses_websocket(&mut self, prompt: &Prompt) -> Result<ResponseStream> {
|
||||
let auth_manager = self.state.auth_manager.clone();
|
||||
let api_prompt = self.build_responses_request(prompt)?;
|
||||
|
||||
pub fn get_otel_manager(&self) -> OtelManager {
|
||||
self.otel_manager.clone()
|
||||
}
|
||||
|
||||
pub fn get_session_source(&self) -> SessionSource {
|
||||
self.session_source.clone()
|
||||
}
|
||||
|
||||
/// Returns the currently configured model slug.
|
||||
pub fn get_model(&self) -> String {
|
||||
self.model_info.slug.clone()
|
||||
}
|
||||
|
||||
pub fn get_model_info(&self) -> ModelInfo {
|
||||
self.model_info.clone()
|
||||
}
|
||||
|
||||
/// Returns the current reasoning effort setting.
|
||||
pub fn get_reasoning_effort(&self) -> Option<ReasoningEffortConfig> {
|
||||
self.effort
|
||||
}
|
||||
|
||||
/// Returns the current reasoning summary setting.
|
||||
pub fn get_reasoning_summary(&self) -> ReasoningSummaryConfig {
|
||||
self.summary
|
||||
}
|
||||
|
||||
pub fn get_auth_manager(&self) -> Option<Arc<AuthManager>> {
|
||||
self.auth_manager.clone()
|
||||
}
|
||||
|
||||
/// Compacts the current conversation history using the Compact endpoint.
|
||||
///
|
||||
/// This is a unary call (no streaming) that returns a new list of
|
||||
/// `ResponseItem`s representing the compacted transcript.
|
||||
pub async fn compact_conversation_history(&self, prompt: &Prompt) -> Result<Vec<ResponseItem>> {
|
||||
if prompt.input.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
let auth_manager = self.auth_manager.clone();
|
||||
let auth = match auth_manager.as_ref() {
|
||||
Some(manager) => manager.auth().await,
|
||||
None => None,
|
||||
};
|
||||
let api_provider = self
|
||||
.provider
|
||||
.to_api_provider(auth.as_ref().map(|a| a.mode))?;
|
||||
let api_auth = auth_provider_from_auth(auth.clone(), &self.provider)?;
|
||||
let transport = ReqwestTransport::new(build_reqwest_client());
|
||||
let request_telemetry = self.build_request_telemetry();
|
||||
let client = ApiCompactClient::new(transport, api_provider, api_auth)
|
||||
.with_telemetry(Some(request_telemetry));
|
||||
|
||||
let instructions = prompt
|
||||
.get_full_instructions(&self.get_model_info())
|
||||
.into_owned();
|
||||
let payload = ApiCompactionInput {
|
||||
model: &self.get_model(),
|
||||
input: &prompt.input,
|
||||
instructions: &instructions,
|
||||
};
|
||||
|
||||
let mut extra_headers = ApiHeaderMap::new();
|
||||
if let SessionSource::SubAgent(sub) = &self.session_source {
|
||||
let subagent = if let crate::protocol::SubAgentSource::Other(label) = sub {
|
||||
label.clone()
|
||||
} else {
|
||||
serde_json::to_value(sub)
|
||||
.ok()
|
||||
.and_then(|v| v.as_str().map(std::string::ToString::to_string))
|
||||
.unwrap_or_else(|| "other".to_string())
|
||||
let mut auth_recovery = auth_manager
|
||||
.as_ref()
|
||||
.map(super::auth::AuthManager::unauthorized_recovery);
|
||||
loop {
|
||||
let auth = match auth_manager.as_ref() {
|
||||
Some(manager) => manager.auth().await,
|
||||
None => None,
|
||||
};
|
||||
if let Ok(val) = HeaderValue::from_str(&subagent) {
|
||||
extra_headers.insert("x-openai-subagent", val);
|
||||
}
|
||||
let api_provider = self
|
||||
.state
|
||||
.provider
|
||||
.to_api_provider(auth.as_ref().map(|a| a.mode))?;
|
||||
let api_auth = auth_provider_from_auth(auth.clone(), &self.state.provider)?;
|
||||
let compression = self.responses_request_compression(auth.as_ref());
|
||||
|
||||
let options = self.build_responses_options(prompt, compression);
|
||||
let request = self.prepare_websocket_request(&api_prompt, &options);
|
||||
|
||||
let connection = match self
|
||||
.websocket_connection(api_provider.clone(), api_auth.clone(), &options)
|
||||
.await
|
||||
{
|
||||
Ok(connection) => connection,
|
||||
Err(ApiError::Transport(TransportError::Http { status, .. }))
|
||||
if status == StatusCode::UNAUTHORIZED =>
|
||||
{
|
||||
handle_unauthorized(status, &mut auth_recovery).await?;
|
||||
continue;
|
||||
}
|
||||
Err(err) => return Err(map_api_error(err)),
|
||||
};
|
||||
|
||||
let stream_result = connection
|
||||
.stream_request(request)
|
||||
.await
|
||||
.map_err(map_api_error)?;
|
||||
self.websocket_last_items = api_prompt.input.clone();
|
||||
|
||||
return Ok(map_response_stream(
|
||||
stream_result,
|
||||
self.state.otel_manager.clone(),
|
||||
));
|
||||
}
|
||||
|
||||
client
|
||||
.compact_input(&payload, extra_headers)
|
||||
.await
|
||||
.map_err(map_api_error)
|
||||
}
|
||||
}
|
||||
|
||||
impl ModelClient {
|
||||
/// Builds request and SSE telemetry for streaming API calls (Chat/Responses).
|
||||
fn build_streaming_telemetry(&self) -> (Arc<dyn RequestTelemetry>, Arc<dyn SseTelemetry>) {
|
||||
let telemetry = Arc::new(ApiTelemetry::new(self.otel_manager.clone()));
|
||||
let telemetry = Arc::new(ApiTelemetry::new(self.state.otel_manager.clone()));
|
||||
let request_telemetry: Arc<dyn RequestTelemetry> = telemetry.clone();
|
||||
let sse_telemetry: Arc<dyn SseTelemetry> = telemetry;
|
||||
(request_telemetry, sse_telemetry)
|
||||
}
|
||||
}
|
||||
|
||||
impl ModelClient {
|
||||
/// Builds request telemetry for unary API calls (e.g., Compact endpoint).
|
||||
fn build_request_telemetry(&self) -> Arc<dyn RequestTelemetry> {
|
||||
let telemetry = Arc::new(ApiTelemetry::new(self.otel_manager.clone()));
|
||||
let telemetry = Arc::new(ApiTelemetry::new(self.state.otel_manager.clone()));
|
||||
let request_telemetry: Arc<dyn RequestTelemetry> = telemetry;
|
||||
request_telemetry
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ use codex_protocol::protocol::TurnAbortReason;
|
||||
use codex_protocol::protocol::TurnContextItem;
|
||||
use codex_protocol::protocol::TurnStartedEvent;
|
||||
use codex_rmcp_client::ElicitationResponse;
|
||||
use codex_rmcp_client::OAuthCredentialsStoreMode;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::prelude::*;
|
||||
use futures::stream::FuturesOrdered;
|
||||
@@ -77,6 +78,7 @@ use tracing::warn;
|
||||
use crate::ModelProviderInfo;
|
||||
use crate::WireApi;
|
||||
use crate::client::ModelClient;
|
||||
use crate::client::ModelClientSession;
|
||||
use crate::client_common::Prompt;
|
||||
use crate::client_common::ResponseEvent;
|
||||
use crate::compact::collect_user_messages;
|
||||
@@ -84,6 +86,7 @@ use crate::config::Config;
|
||||
use crate::config::Constrained;
|
||||
use crate::config::ConstraintResult;
|
||||
use crate::config::GhostSnapshotConfig;
|
||||
use crate::config::types::McpServerConfig;
|
||||
use crate::config::types::ShellEnvironmentPolicy;
|
||||
use crate::context_manager::ContextManager;
|
||||
use crate::environment_context::EnvironmentContext;
|
||||
@@ -107,6 +110,7 @@ use crate::protocol::ErrorEvent;
|
||||
use crate::protocol::Event;
|
||||
use crate::protocol::EventMsg;
|
||||
use crate::protocol::ExecApprovalRequestEvent;
|
||||
use crate::protocol::McpServerRefreshConfig;
|
||||
use crate::protocol::Op;
|
||||
use crate::protocol::RateLimitSnapshot;
|
||||
use crate::protocol::ReasoningContentDeltaEvent;
|
||||
@@ -148,7 +152,6 @@ use crate::tools::spec::ToolsConfig;
|
||||
use crate::tools::spec::ToolsConfigParams;
|
||||
use crate::turn_diff_tracker::TurnDiffTracker;
|
||||
use crate::unified_exec::UnifiedExecProcessManager;
|
||||
use crate::user_instructions::DeveloperInstructions;
|
||||
use crate::user_instructions::UserInstructions;
|
||||
use crate::user_notification::UserNotification;
|
||||
use crate::util::backoff;
|
||||
@@ -156,6 +159,7 @@ use codex_async_utils::OrCancelExt;
|
||||
use codex_otel::OtelManager;
|
||||
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::DeveloperInstructions;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
|
||||
@@ -361,6 +365,7 @@ pub(crate) struct Session {
|
||||
/// The set of enabled features should be invariant for the lifetime of the
|
||||
/// session.
|
||||
features: Features,
|
||||
pending_mcp_server_refresh_config: Mutex<Option<McpServerRefreshConfig>>,
|
||||
pub(crate) active_turn: Mutex<Option<ActiveTurn>>,
|
||||
pub(crate) services: SessionServices,
|
||||
next_internal_sub_id: AtomicU64,
|
||||
@@ -685,7 +690,7 @@ impl Session {
|
||||
|
||||
let services = SessionServices {
|
||||
mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())),
|
||||
mcp_startup_cancellation_token: CancellationToken::new(),
|
||||
mcp_startup_cancellation_token: Mutex::new(CancellationToken::new()),
|
||||
unified_exec_manager: UnifiedExecProcessManager::default(),
|
||||
notifier: UserNotifier::new(config.notify.clone()),
|
||||
rollout: Mutex::new(Some(rollout_recorder)),
|
||||
@@ -706,6 +711,7 @@ impl Session {
|
||||
agent_status,
|
||||
state: Mutex::new(state),
|
||||
features: config.features.clone(),
|
||||
pending_mcp_server_refresh_config: Mutex::new(None),
|
||||
active_turn: Mutex::new(None),
|
||||
services,
|
||||
next_internal_sub_id: AtomicU64::new(0),
|
||||
@@ -742,16 +748,18 @@ impl Session {
|
||||
codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(),
|
||||
sandbox_cwd: session_configuration.cwd.clone(),
|
||||
};
|
||||
let cancel_token = sess.mcp_startup_cancellation_token().await;
|
||||
|
||||
sess.services
|
||||
.mcp_connection_manager
|
||||
.write()
|
||||
.await
|
||||
.initialize(
|
||||
config.mcp_servers.clone(),
|
||||
&config.mcp_servers,
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
auth_statuses.clone(),
|
||||
tx_event.clone(),
|
||||
sess.services.mcp_startup_cancellation_token.clone(),
|
||||
cancel_token,
|
||||
sandbox_state,
|
||||
)
|
||||
.await;
|
||||
@@ -852,6 +860,11 @@ impl Session {
|
||||
if persist && !rollout_items.is_empty() {
|
||||
self.persist_rollout_items(&rollout_items).await;
|
||||
}
|
||||
|
||||
// Append the current session's initial context after the reconstructed history.
|
||||
let initial_context = self.build_initial_context(&turn_context);
|
||||
self.record_conversation_items(&turn_context, &initial_context)
|
||||
.await;
|
||||
// Flush after seeding history and any persisted rollout copy.
|
||||
self.flush_rollout().await;
|
||||
}
|
||||
@@ -1004,6 +1017,28 @@ impl Session {
|
||||
)))
|
||||
}
|
||||
|
||||
fn build_permissions_update_item(
|
||||
&self,
|
||||
previous: Option<&Arc<TurnContext>>,
|
||||
next: &TurnContext,
|
||||
) -> Option<ResponseItem> {
|
||||
let prev = previous?;
|
||||
if prev.sandbox_policy == next.sandbox_policy
|
||||
&& prev.approval_policy == next.approval_policy
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(
|
||||
DeveloperInstructions::from_policy(
|
||||
&next.sandbox_policy,
|
||||
next.approval_policy,
|
||||
&next.cwd,
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Persist the event to rollout and send it to clients.
|
||||
pub(crate) async fn send_event(&self, turn_context: &TurnContext, msg: EventMsg) {
|
||||
let legacy_source = msg.clone();
|
||||
@@ -1333,8 +1368,16 @@ impl Session {
|
||||
}
|
||||
|
||||
pub(crate) fn build_initial_context(&self, turn_context: &TurnContext) -> Vec<ResponseItem> {
|
||||
let mut items = Vec::<ResponseItem>::with_capacity(3);
|
||||
let mut items = Vec::<ResponseItem>::with_capacity(4);
|
||||
let shell = self.user_shell();
|
||||
items.push(
|
||||
DeveloperInstructions::from_policy(
|
||||
&turn_context.sandbox_policy,
|
||||
turn_context.approval_policy,
|
||||
&turn_context.cwd,
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
if let Some(developer_instructions) = turn_context.developer_instructions.as_deref() {
|
||||
items.push(DeveloperInstructions::new(developer_instructions.to_string()).into());
|
||||
}
|
||||
@@ -1349,8 +1392,6 @@ impl Session {
|
||||
}
|
||||
items.push(ResponseItem::from(EnvironmentContext::new(
|
||||
Some(turn_context.cwd.clone()),
|
||||
Some(turn_context.approval_policy),
|
||||
Some(turn_context.sandbox_policy.clone()),
|
||||
shell.as_ref().clone(),
|
||||
)));
|
||||
items
|
||||
@@ -1567,6 +1608,17 @@ impl Session {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn has_pending_input(&self) -> bool {
|
||||
let active = self.active_turn.lock().await;
|
||||
match active.as_ref() {
|
||||
Some(at) => {
|
||||
let ts = at.turn_state.lock().await;
|
||||
ts.has_pending_input()
|
||||
}
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list_resources(
|
||||
&self,
|
||||
server: &str,
|
||||
@@ -1647,12 +1699,85 @@ impl Session {
|
||||
Arc::clone(&self.services.user_shell)
|
||||
}
|
||||
|
||||
async fn refresh_mcp_servers_if_requested(&self, turn_context: &TurnContext) {
|
||||
let refresh_config = { self.pending_mcp_server_refresh_config.lock().await.take() };
|
||||
let Some(refresh_config) = refresh_config else {
|
||||
return;
|
||||
};
|
||||
|
||||
let McpServerRefreshConfig {
|
||||
mcp_servers,
|
||||
mcp_oauth_credentials_store_mode,
|
||||
} = refresh_config;
|
||||
|
||||
let mcp_servers =
|
||||
match serde_json::from_value::<HashMap<String, McpServerConfig>>(mcp_servers) {
|
||||
Ok(servers) => servers,
|
||||
Err(err) => {
|
||||
warn!("failed to parse MCP server refresh config: {err}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
let store_mode = match serde_json::from_value::<OAuthCredentialsStoreMode>(
|
||||
mcp_oauth_credentials_store_mode,
|
||||
) {
|
||||
Ok(mode) => mode,
|
||||
Err(err) => {
|
||||
warn!("failed to parse MCP OAuth refresh config: {err}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let auth_statuses = compute_auth_statuses(mcp_servers.iter(), store_mode).await;
|
||||
let sandbox_state = SandboxState {
|
||||
sandbox_policy: turn_context.sandbox_policy.clone(),
|
||||
codex_linux_sandbox_exe: turn_context.codex_linux_sandbox_exe.clone(),
|
||||
sandbox_cwd: turn_context.cwd.clone(),
|
||||
};
|
||||
let cancel_token = self.reset_mcp_startup_cancellation_token().await;
|
||||
|
||||
let mut refreshed_manager = McpConnectionManager::default();
|
||||
refreshed_manager
|
||||
.initialize(
|
||||
&mcp_servers,
|
||||
store_mode,
|
||||
auth_statuses,
|
||||
self.get_tx_event(),
|
||||
cancel_token,
|
||||
sandbox_state,
|
||||
)
|
||||
.await;
|
||||
|
||||
let mut manager = self.services.mcp_connection_manager.write().await;
|
||||
*manager = refreshed_manager;
|
||||
}
|
||||
|
||||
async fn mcp_startup_cancellation_token(&self) -> CancellationToken {
|
||||
self.services
|
||||
.mcp_startup_cancellation_token
|
||||
.lock()
|
||||
.await
|
||||
.clone()
|
||||
}
|
||||
|
||||
async fn reset_mcp_startup_cancellation_token(&self) -> CancellationToken {
|
||||
let mut guard = self.services.mcp_startup_cancellation_token.lock().await;
|
||||
guard.cancel();
|
||||
let cancel_token = CancellationToken::new();
|
||||
*guard = cancel_token.clone();
|
||||
cancel_token
|
||||
}
|
||||
|
||||
fn show_raw_agent_reasoning(&self) -> bool {
|
||||
self.services.show_raw_agent_reasoning
|
||||
}
|
||||
|
||||
async fn cancel_mcp_startup(&self) {
|
||||
self.services.mcp_startup_cancellation_token.cancel();
|
||||
self.services
|
||||
.mcp_startup_cancellation_token
|
||||
.lock()
|
||||
.await
|
||||
.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1710,6 +1835,9 @@ async fn submission_loop(sess: Arc<Session>, config: Arc<Config>, rx_sub: Receiv
|
||||
Op::ListMcpTools => {
|
||||
handlers::list_mcp_tools(&sess, &config, sub.id.clone()).await;
|
||||
}
|
||||
Op::RefreshMcpServers { config } => {
|
||||
handlers::refresh_mcp_servers(&sess, config).await;
|
||||
}
|
||||
Op::ListCustomPrompts => {
|
||||
handlers::list_custom_prompts(&sess, sub.id.clone()).await;
|
||||
}
|
||||
@@ -1778,6 +1906,7 @@ mod handlers {
|
||||
use codex_protocol::protocol::EventMsg;
|
||||
use codex_protocol::protocol::ListCustomPromptsResponseEvent;
|
||||
use codex_protocol::protocol::ListSkillsResponseEvent;
|
||||
use codex_protocol::protocol::McpServerRefreshConfig;
|
||||
use codex_protocol::protocol::Op;
|
||||
use codex_protocol::protocol::ReviewDecision;
|
||||
use codex_protocol::protocol::ReviewRequest;
|
||||
@@ -1869,13 +1998,24 @@ mod handlers {
|
||||
|
||||
// Attempt to inject input into current task
|
||||
if let Err(items) = sess.inject_input(items).await {
|
||||
let mut update_items = Vec::new();
|
||||
if let Some(env_item) =
|
||||
sess.build_environment_update_item(previous_context.as_ref(), ¤t_context)
|
||||
{
|
||||
sess.record_conversation_items(¤t_context, std::slice::from_ref(&env_item))
|
||||
update_items.push(env_item);
|
||||
}
|
||||
if let Some(permissions_item) =
|
||||
sess.build_permissions_update_item(previous_context.as_ref(), ¤t_context)
|
||||
{
|
||||
update_items.push(permissions_item);
|
||||
}
|
||||
if !update_items.is_empty() {
|
||||
sess.record_conversation_items(¤t_context, &update_items)
|
||||
.await;
|
||||
}
|
||||
|
||||
sess.refresh_mcp_servers_if_requested(¤t_context)
|
||||
.await;
|
||||
sess.spawn_task(Arc::clone(¤t_context), items, RegularTask)
|
||||
.await;
|
||||
*previous_context = Some(current_context);
|
||||
@@ -2007,6 +2147,11 @@ mod handlers {
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn refresh_mcp_servers(sess: &Arc<Session>, refresh_config: McpServerRefreshConfig) {
|
||||
let mut guard = sess.pending_mcp_server_refresh_config.lock().await;
|
||||
*guard = Some(refresh_config);
|
||||
}
|
||||
|
||||
pub async fn list_mcp_tools(sess: &Session, config: &Arc<Config>, sub_id: String) {
|
||||
let mcp_connection_manager = sess.services.mcp_connection_manager.read().await;
|
||||
let snapshot = collect_mcp_snapshot_from_manager(
|
||||
@@ -2191,6 +2336,7 @@ mod handlers {
|
||||
review_request: ReviewRequest,
|
||||
) {
|
||||
let turn_context = sess.new_default_turn_with_sub_id(sub_id.clone()).await;
|
||||
sess.refresh_mcp_servers_if_requested(&turn_context).await;
|
||||
match resolve_review_request(review_request, turn_context.cwd.as_path()) {
|
||||
Ok(resolved) => {
|
||||
spawn_review_thread(
|
||||
@@ -2397,6 +2543,8 @@ pub(crate) async fn run_turn(
|
||||
// many turns, from the perspective of the user, it is a single turn.
|
||||
let turn_diff_tracker = Arc::new(tokio::sync::Mutex::new(TurnDiffTracker::new()));
|
||||
|
||||
let mut client_session = turn_context.client.new_session();
|
||||
|
||||
loop {
|
||||
// Note that pending_input would be something like a message the user
|
||||
// submitted through the UI while the model was running. Though the UI
|
||||
@@ -2427,6 +2575,7 @@ pub(crate) async fn run_turn(
|
||||
Arc::clone(&sess),
|
||||
Arc::clone(&turn_context),
|
||||
Arc::clone(&turn_diff_tracker),
|
||||
&mut client_session,
|
||||
turn_input,
|
||||
cancellation_token.child_token(),
|
||||
)
|
||||
@@ -2504,6 +2653,7 @@ async fn run_model_turn(
|
||||
sess: Arc<Session>,
|
||||
turn_context: Arc<TurnContext>,
|
||||
turn_diff_tracker: SharedTurnDiffTracker,
|
||||
client_session: &mut ModelClientSession,
|
||||
input: Vec<ResponseItem>,
|
||||
cancellation_token: CancellationToken,
|
||||
) -> CodexResult<TurnRunResult> {
|
||||
@@ -2544,6 +2694,7 @@ async fn run_model_turn(
|
||||
Arc::clone(&router),
|
||||
Arc::clone(&sess),
|
||||
Arc::clone(&turn_context),
|
||||
client_session,
|
||||
Arc::clone(&turn_diff_tracker),
|
||||
&prompt,
|
||||
cancellation_token.child_token(),
|
||||
@@ -2635,6 +2786,7 @@ async fn try_run_turn(
|
||||
router: Arc<ToolRouter>,
|
||||
sess: Arc<Session>,
|
||||
turn_context: Arc<TurnContext>,
|
||||
client_session: &mut ModelClientSession,
|
||||
turn_diff_tracker: SharedTurnDiffTracker,
|
||||
prompt: &Prompt,
|
||||
cancellation_token: CancellationToken,
|
||||
@@ -2663,9 +2815,7 @@ async fn try_run_turn(
|
||||
);
|
||||
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
let mut stream = turn_context
|
||||
.client
|
||||
.clone()
|
||||
let mut stream = client_session
|
||||
.stream(prompt)
|
||||
.instrument(trace_span!("stream_request"))
|
||||
.or_cancel(&cancellation_token)
|
||||
@@ -2767,6 +2917,9 @@ async fn try_run_turn(
|
||||
.await;
|
||||
should_emit_turn_diff = true;
|
||||
|
||||
needs_follow_up |= sess.has_pending_input().await;
|
||||
error!("needs_follow_up: {needs_follow_up}");
|
||||
|
||||
break Ok(TurnRunResult {
|
||||
needs_follow_up,
|
||||
last_agent_message,
|
||||
@@ -2943,7 +3096,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn record_initial_history_reconstructs_resumed_transcript() {
|
||||
let (session, turn_context) = make_session_and_context().await;
|
||||
let (rollout_items, expected) = sample_rollout(&session, &turn_context);
|
||||
let (rollout_items, mut expected) = sample_rollout(&session, &turn_context);
|
||||
|
||||
session
|
||||
.record_initial_history(InitialHistory::Resumed(ResumedHistory {
|
||||
@@ -2953,6 +3106,7 @@ mod tests {
|
||||
}))
|
||||
.await;
|
||||
|
||||
expected.extend(session.build_initial_context(&turn_context));
|
||||
let history = session.state.lock().await.clone_history();
|
||||
assert_eq!(expected, history.raw_items());
|
||||
}
|
||||
@@ -3037,12 +3191,13 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn record_initial_history_reconstructs_forked_transcript() {
|
||||
let (session, turn_context) = make_session_and_context().await;
|
||||
let (rollout_items, expected) = sample_rollout(&session, &turn_context);
|
||||
let (rollout_items, mut expected) = sample_rollout(&session, &turn_context);
|
||||
|
||||
session
|
||||
.record_initial_history(InitialHistory::Forked(rollout_items))
|
||||
.await;
|
||||
|
||||
expected.extend(session.build_initial_context(&turn_context));
|
||||
let history = session.state.lock().await.clone_history();
|
||||
assert_eq!(expected, history.raw_items());
|
||||
}
|
||||
@@ -3526,7 +3681,7 @@ mod tests {
|
||||
|
||||
let services = SessionServices {
|
||||
mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())),
|
||||
mcp_startup_cancellation_token: CancellationToken::new(),
|
||||
mcp_startup_cancellation_token: Mutex::new(CancellationToken::new()),
|
||||
unified_exec_manager: UnifiedExecProcessManager::default(),
|
||||
notifier: UserNotifier::new(None),
|
||||
rollout: Mutex::new(None),
|
||||
@@ -3558,6 +3713,7 @@ mod tests {
|
||||
agent_status: agent_status_tx,
|
||||
state: Mutex::new(state),
|
||||
features: config.features.clone(),
|
||||
pending_mcp_server_refresh_config: Mutex::new(None),
|
||||
active_turn: Mutex::new(None),
|
||||
services,
|
||||
next_internal_sub_id: AtomicU64::new(0),
|
||||
@@ -3620,7 +3776,7 @@ mod tests {
|
||||
|
||||
let services = SessionServices {
|
||||
mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())),
|
||||
mcp_startup_cancellation_token: CancellationToken::new(),
|
||||
mcp_startup_cancellation_token: Mutex::new(CancellationToken::new()),
|
||||
unified_exec_manager: UnifiedExecProcessManager::default(),
|
||||
notifier: UserNotifier::new(None),
|
||||
rollout: Mutex::new(None),
|
||||
@@ -3652,6 +3808,7 @@ mod tests {
|
||||
agent_status: agent_status_tx,
|
||||
state: Mutex::new(state),
|
||||
features: config.features.clone(),
|
||||
pending_mcp_server_refresh_config: Mutex::new(None),
|
||||
active_turn: Mutex::new(None),
|
||||
services,
|
||||
next_internal_sub_id: AtomicU64::new(0),
|
||||
@@ -3660,6 +3817,48 @@ mod tests {
|
||||
(session, turn_context, rx_event)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn refresh_mcp_servers_is_deferred_until_next_turn() {
|
||||
let (session, turn_context) = make_session_and_context().await;
|
||||
let old_token = session.mcp_startup_cancellation_token().await;
|
||||
assert!(!old_token.is_cancelled());
|
||||
|
||||
let mcp_oauth_credentials_store_mode =
|
||||
serde_json::to_value(OAuthCredentialsStoreMode::Auto).expect("serialize store mode");
|
||||
let refresh_config = McpServerRefreshConfig {
|
||||
mcp_servers: json!({}),
|
||||
mcp_oauth_credentials_store_mode,
|
||||
};
|
||||
{
|
||||
let mut guard = session.pending_mcp_server_refresh_config.lock().await;
|
||||
*guard = Some(refresh_config);
|
||||
}
|
||||
|
||||
assert!(!old_token.is_cancelled());
|
||||
assert!(
|
||||
session
|
||||
.pending_mcp_server_refresh_config
|
||||
.lock()
|
||||
.await
|
||||
.is_some()
|
||||
);
|
||||
|
||||
session
|
||||
.refresh_mcp_servers_if_requested(&turn_context)
|
||||
.await;
|
||||
|
||||
assert!(old_token.is_cancelled());
|
||||
assert!(
|
||||
session
|
||||
.pending_mcp_server_refresh_config
|
||||
.lock()
|
||||
.await
|
||||
.is_none()
|
||||
);
|
||||
let new_token = session.mcp_startup_cancellation_token().await;
|
||||
assert!(!new_token.is_cancelled());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn record_model_warning_appends_user_message() {
|
||||
let (mut session, turn_context) = make_session_and_context().await;
|
||||
|
||||
@@ -1,46 +1,8 @@
|
||||
use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
|
||||
use crate::sandboxing::SandboxPermissions;
|
||||
|
||||
use crate::bash::parse_shell_lc_plain_commands;
|
||||
use crate::is_safe_command::is_known_safe_command;
|
||||
#[cfg(windows)]
|
||||
#[path = "windows_dangerous_commands.rs"]
|
||||
mod windows_dangerous_commands;
|
||||
|
||||
pub fn requires_initial_appoval(
|
||||
policy: AskForApproval,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
command: &[String],
|
||||
sandbox_permissions: SandboxPermissions,
|
||||
) -> bool {
|
||||
if is_known_safe_command(command) {
|
||||
return false;
|
||||
}
|
||||
match policy {
|
||||
AskForApproval::Never | AskForApproval::OnFailure => false,
|
||||
AskForApproval::OnRequest => {
|
||||
// In DangerFullAccess or ExternalSandbox, only prompt if the command looks dangerous.
|
||||
if matches!(
|
||||
sandbox_policy,
|
||||
SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. }
|
||||
) {
|
||||
return command_might_be_dangerous(command);
|
||||
}
|
||||
|
||||
// In restricted sandboxes (ReadOnly/WorkspaceWrite), do not prompt for
|
||||
// non‑escalated, non‑dangerous commands — let the sandbox enforce
|
||||
// restrictions (e.g., block network/write) without a user prompt.
|
||||
if sandbox_permissions.requires_escalated_permissions() {
|
||||
return true;
|
||||
}
|
||||
command_might_be_dangerous(command)
|
||||
}
|
||||
AskForApproval::UnlessTrusted => !is_known_safe_command(command),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn command_might_be_dangerous(command: &[String]) -> bool {
|
||||
#[cfg(windows)]
|
||||
{
|
||||
@@ -86,7 +48,6 @@ fn is_dangerous_to_call_with_exec(command: &[String]) -> bool {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::protocol::NetworkAccess;
|
||||
|
||||
fn vec_str(items: &[&str]) -> Vec<String> {
|
||||
items.iter().map(std::string::ToString::to_string).collect()
|
||||
@@ -154,23 +115,4 @@ mod tests {
|
||||
fn rm_f_is_dangerous() {
|
||||
assert!(command_might_be_dangerous(&vec_str(&["rm", "-f", "/"])));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn external_sandbox_only_prompts_for_dangerous_commands() {
|
||||
let external_policy = SandboxPolicy::ExternalSandbox {
|
||||
network_access: NetworkAccess::Restricted,
|
||||
};
|
||||
assert!(!requires_initial_appoval(
|
||||
AskForApproval::OnRequest,
|
||||
&external_policy,
|
||||
&vec_str(&["ls"]),
|
||||
SandboxPermissions::UseDefault,
|
||||
));
|
||||
assert!(requires_initial_appoval(
|
||||
AskForApproval::OnRequest,
|
||||
&external_policy,
|
||||
&vec_str(&["rm", "-rf", "/"]),
|
||||
SandboxPermissions::UseDefault,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -297,7 +297,8 @@ async fn drain_to_completed(
|
||||
turn_context: &TurnContext,
|
||||
prompt: &Prompt,
|
||||
) -> CodexResult<()> {
|
||||
let mut stream = turn_context.client.clone().stream(prompt).await?;
|
||||
let mut client_session = turn_context.client.new_session();
|
||||
let mut stream = client_session.stream(prompt).await?;
|
||||
loop {
|
||||
let maybe_event = stream.next().await;
|
||||
let Some(event) = maybe_event else {
|
||||
|
||||
@@ -37,11 +37,15 @@ impl From<ConstraintError> for std::io::Error {
|
||||
}
|
||||
|
||||
type ConstraintValidator<T> = dyn Fn(&T) -> ConstraintResult<()> + Send + Sync;
|
||||
/// A ConstraintNormalizer is a function which transforms a value into another of the same type.
|
||||
/// `Constrained` uses normalizers to transform values to satisfy constraints or enforce values.
|
||||
type ConstraintNormalizer<T> = dyn Fn(T) -> T + Send + Sync;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Constrained<T> {
|
||||
value: T,
|
||||
validator: Arc<ConstraintValidator<T>>,
|
||||
normalizer: Option<Arc<ConstraintNormalizer<T>>>,
|
||||
}
|
||||
|
||||
impl<T: Send + Sync> Constrained<T> {
|
||||
@@ -54,6 +58,23 @@ impl<T: Send + Sync> Constrained<T> {
|
||||
Ok(Self {
|
||||
value: initial_value,
|
||||
validator,
|
||||
normalizer: None,
|
||||
})
|
||||
}
|
||||
|
||||
/// normalized creates a `Constrained` value with a normalizer function and a validator that allows any value.
|
||||
pub fn normalized(
|
||||
initial_value: T,
|
||||
normalizer: impl Fn(T) -> T + Send + Sync + 'static,
|
||||
) -> ConstraintResult<Self> {
|
||||
let validator: Arc<ConstraintValidator<T>> = Arc::new(|_| Ok(()));
|
||||
let normalizer: Arc<ConstraintNormalizer<T>> = Arc::new(normalizer);
|
||||
let normalized = normalizer(initial_value);
|
||||
validator(&normalized)?;
|
||||
Ok(Self {
|
||||
value: normalized,
|
||||
validator,
|
||||
normalizer: Some(normalizer),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -61,6 +82,7 @@ impl<T: Send + Sync> Constrained<T> {
|
||||
Self {
|
||||
value: initial_value,
|
||||
validator: Arc::new(|_| Ok(())),
|
||||
normalizer: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,6 +110,11 @@ impl<T: Send + Sync> Constrained<T> {
|
||||
}
|
||||
|
||||
pub fn set(&mut self, value: T) -> ConstraintResult<()> {
|
||||
let value = if let Some(normalizer) = &self.normalizer {
|
||||
normalizer(value)
|
||||
} else {
|
||||
value
|
||||
};
|
||||
(self.validator)(&value)?;
|
||||
self.value = value;
|
||||
Ok(())
|
||||
@@ -143,6 +170,17 @@ mod tests {
|
||||
assert_eq!(constrained.value(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn constrained_normalizer_applies_on_init_and_set() -> anyhow::Result<()> {
|
||||
let mut constrained = Constrained::normalized(-1, |value| value.max(0))?;
|
||||
assert_eq!(constrained.value(), 0);
|
||||
constrained.set(-5)?;
|
||||
assert_eq!(constrained.value(), 0);
|
||||
constrained.set(10)?;
|
||||
assert_eq!(constrained.value(), 10);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn constrained_new_rejects_invalid_initial_value() {
|
||||
let result = Constrained::new(0, |value| {
|
||||
|
||||
@@ -2,6 +2,7 @@ use crate::auth::AuthCredentialsStoreMode;
|
||||
use crate::config::types::DEFAULT_OTEL_ENVIRONMENT;
|
||||
use crate::config::types::History;
|
||||
use crate::config::types::McpServerConfig;
|
||||
use crate::config::types::McpServerTransportConfig;
|
||||
use crate::config::types::Notice;
|
||||
use crate::config::types::Notifications;
|
||||
use crate::config::types::OtelConfig;
|
||||
@@ -16,6 +17,8 @@ use crate::config::types::UriBasedFileOpener;
|
||||
use crate::config_loader::ConfigLayerStack;
|
||||
use crate::config_loader::ConfigRequirements;
|
||||
use crate::config_loader::LoaderOverrides;
|
||||
use crate::config_loader::McpServerIdentity;
|
||||
use crate::config_loader::McpServerRequirement;
|
||||
use crate::config_loader::load_config_layers_state;
|
||||
use crate::features::Feature;
|
||||
use crate::features::FeatureOverrides;
|
||||
@@ -24,6 +27,7 @@ use crate::features::FeaturesToml;
|
||||
use crate::git_info::resolve_root_git_project_for_trust;
|
||||
use crate::model_provider_info::LMSTUDIO_OSS_PROVIDER_ID;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
use crate::model_provider_info::OLLAMA_CHAT_PROVIDER_ID;
|
||||
use crate::model_provider_info::OLLAMA_OSS_PROVIDER_ID;
|
||||
use crate::model_provider_info::built_in_model_providers;
|
||||
use crate::project_doc::DEFAULT_PROJECT_DOC_FILENAME;
|
||||
@@ -43,6 +47,7 @@ use codex_rmcp_client::OAuthCredentialsStoreMode;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use codex_utils_absolute_path::AbsolutePathBufGuard;
|
||||
use dirs::home_dir;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use similar::DiffableStr;
|
||||
@@ -61,6 +66,7 @@ use toml_edit::DocumentMut;
|
||||
mod constraint;
|
||||
pub mod edit;
|
||||
pub mod profile;
|
||||
pub mod schema;
|
||||
pub mod service;
|
||||
pub mod types;
|
||||
pub use constraint::Constrained;
|
||||
@@ -257,7 +263,7 @@ pub struct Config {
|
||||
pub cli_auth_credentials_store_mode: AuthCredentialsStoreMode,
|
||||
|
||||
/// Definition for MCP servers that Codex can reach out to for tool calls.
|
||||
pub mcp_servers: HashMap<String, McpServerConfig>,
|
||||
pub mcp_servers: Constrained<HashMap<String, McpServerConfig>>,
|
||||
|
||||
/// Preferred store for MCP OAuth credentials.
|
||||
/// keyring: Use an OS-specific keyring service.
|
||||
@@ -268,6 +274,11 @@ pub struct Config {
|
||||
/// auto (default): keyring if available, otherwise file.
|
||||
pub mcp_oauth_credentials_store_mode: OAuthCredentialsStoreMode,
|
||||
|
||||
/// Optional fixed port to use for the local HTTP callback server used during MCP OAuth login.
|
||||
///
|
||||
/// When unset, Codex will bind to an ephemeral port chosen by the OS.
|
||||
pub mcp_oauth_callback_port: Option<u16>,
|
||||
|
||||
/// Combined provider map (defaults merged with user-defined overrides).
|
||||
pub model_providers: HashMap<String, ModelProviderInfo>,
|
||||
|
||||
@@ -505,6 +516,59 @@ fn deserialize_config_toml_with_base(
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
|
||||
}
|
||||
|
||||
fn filter_mcp_servers_by_requirements(
|
||||
mcp_servers: &mut HashMap<String, McpServerConfig>,
|
||||
mcp_requirements: Option<&BTreeMap<String, McpServerRequirement>>,
|
||||
) {
|
||||
let Some(allowlist) = mcp_requirements else {
|
||||
return;
|
||||
};
|
||||
|
||||
for (name, server) in mcp_servers.iter_mut() {
|
||||
let allowed = allowlist
|
||||
.get(name)
|
||||
.is_some_and(|requirement| mcp_server_matches_requirement(requirement, server));
|
||||
if !allowed {
|
||||
server.enabled = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn constrain_mcp_servers(
|
||||
mcp_servers: HashMap<String, McpServerConfig>,
|
||||
mcp_requirements: Option<&BTreeMap<String, McpServerRequirement>>,
|
||||
) -> ConstraintResult<Constrained<HashMap<String, McpServerConfig>>> {
|
||||
if mcp_requirements.is_none() {
|
||||
return Ok(Constrained::allow_any(mcp_servers));
|
||||
}
|
||||
|
||||
let mcp_requirements = mcp_requirements.cloned();
|
||||
Constrained::normalized(mcp_servers, move |mut servers| {
|
||||
filter_mcp_servers_by_requirements(&mut servers, mcp_requirements.as_ref());
|
||||
servers
|
||||
})
|
||||
}
|
||||
|
||||
fn mcp_server_matches_requirement(
|
||||
requirement: &McpServerRequirement,
|
||||
server: &McpServerConfig,
|
||||
) -> bool {
|
||||
match &requirement.identity {
|
||||
McpServerIdentity::Command {
|
||||
command: want_command,
|
||||
} => matches!(
|
||||
&server.transport,
|
||||
McpServerTransportConfig::Stdio { command: got_command, .. }
|
||||
if got_command == want_command
|
||||
),
|
||||
McpServerIdentity::Url { url: want_url } => matches!(
|
||||
&server.transport,
|
||||
McpServerTransportConfig::StreamableHttp { url: got_url, .. }
|
||||
if got_url == want_url
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load_global_mcp_servers(
|
||||
codex_home: &Path,
|
||||
) -> std::io::Result<BTreeMap<String, McpServerConfig>> {
|
||||
@@ -643,14 +707,14 @@ pub fn set_project_trust_level(
|
||||
pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::Result<()> {
|
||||
// Validate that the provider is one of the known OSS providers
|
||||
match provider {
|
||||
LMSTUDIO_OSS_PROVIDER_ID | OLLAMA_OSS_PROVIDER_ID => {
|
||||
LMSTUDIO_OSS_PROVIDER_ID | OLLAMA_OSS_PROVIDER_ID | OLLAMA_CHAT_PROVIDER_ID => {
|
||||
// Valid provider, continue
|
||||
}
|
||||
_ => {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidInput,
|
||||
format!(
|
||||
"Invalid OSS provider '{provider}'. Must be one of: {LMSTUDIO_OSS_PROVIDER_ID}, {OLLAMA_OSS_PROVIDER_ID}"
|
||||
"Invalid OSS provider '{provider}'. Must be one of: {LMSTUDIO_OSS_PROVIDER_ID}, {OLLAMA_OSS_PROVIDER_ID}, {OLLAMA_CHAT_PROVIDER_ID}"
|
||||
),
|
||||
));
|
||||
}
|
||||
@@ -682,7 +746,8 @@ pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::R
|
||||
}
|
||||
|
||||
/// Base config deserialized from ~/.codex/config.toml.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct ConfigToml {
|
||||
/// Optional override of model selection.
|
||||
pub model: Option<String>,
|
||||
@@ -741,6 +806,8 @@ pub struct ConfigToml {
|
||||
|
||||
/// Definition for MCP servers that Codex can reach out to for tool calls.
|
||||
#[serde(default)]
|
||||
// Uses the raw MCP input shape (custom deserialization) rather than `McpServerConfig`.
|
||||
#[schemars(schema_with = "crate::config::schema::mcp_servers_schema")]
|
||||
pub mcp_servers: HashMap<String, McpServerConfig>,
|
||||
|
||||
/// Preferred backend for storing MCP OAuth credentials.
|
||||
@@ -751,6 +818,10 @@ pub struct ConfigToml {
|
||||
#[serde(default)]
|
||||
pub mcp_oauth_credentials_store: Option<OAuthCredentialsStoreMode>,
|
||||
|
||||
/// Optional fixed port for the local HTTP callback server used during MCP OAuth login.
|
||||
/// When unset, Codex will bind to an ephemeral port chosen by the OS.
|
||||
pub mcp_oauth_callback_port: Option<u16>,
|
||||
|
||||
/// User-defined provider entries that extend/override the built-in list.
|
||||
#[serde(default)]
|
||||
pub model_providers: HashMap<String, ModelProviderInfo>,
|
||||
@@ -808,6 +879,8 @@ pub struct ConfigToml {
|
||||
|
||||
/// Centralized feature flags (new). Prefer this over individual toggles.
|
||||
#[serde(default)]
|
||||
// Injects known feature keys into the schema and forbids unknown keys.
|
||||
#[schemars(schema_with = "crate::config::schema::features_schema")]
|
||||
pub features: Option<FeaturesToml>,
|
||||
|
||||
/// Settings for ghost snapshots (used for undo).
|
||||
@@ -852,7 +925,7 @@ pub struct ConfigToml {
|
||||
pub experimental_compact_prompt_file: Option<AbsolutePathBuf>,
|
||||
pub experimental_use_unified_exec_tool: Option<bool>,
|
||||
pub experimental_use_freeform_apply_patch: Option<bool>,
|
||||
/// Preferred OSS provider for local models, e.g. "lmstudio" or "ollama".
|
||||
/// Preferred OSS provider for local models, e.g. "lmstudio", "ollama", or "ollama-chat".
|
||||
pub oss_provider: Option<String>,
|
||||
}
|
||||
|
||||
@@ -881,7 +954,8 @@ impl From<ConfigToml> for UserSavedConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct ProjectConfig {
|
||||
pub trust_level: Option<TrustLevel>,
|
||||
}
|
||||
@@ -896,7 +970,8 @@ impl ProjectConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct ToolsToml {
|
||||
#[serde(default, alias = "web_search_request")]
|
||||
pub web_search: Option<bool>,
|
||||
@@ -915,7 +990,8 @@ impl From<ToolsToml> for Tools {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct GhostSnapshotToml {
|
||||
/// Exclude untracked files larger than this many bytes from ghost snapshots.
|
||||
#[serde(alias = "ignore_untracked_files_over_bytes")]
|
||||
@@ -1327,6 +1403,7 @@ impl Config {
|
||||
let ConfigRequirements {
|
||||
approval_policy: mut constrained_approval_policy,
|
||||
sandbox_policy: mut constrained_sandbox_policy,
|
||||
mcp_server_requirements,
|
||||
} = requirements;
|
||||
|
||||
constrained_approval_policy
|
||||
@@ -1336,6 +1413,12 @@ impl Config {
|
||||
.set(sandbox_policy)
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, format!("{e}")))?;
|
||||
|
||||
let mcp_servers =
|
||||
constrain_mcp_servers(cfg.mcp_servers.clone(), mcp_server_requirements.as_ref())
|
||||
.map_err(|e| {
|
||||
std::io::Error::new(std::io::ErrorKind::InvalidInput, format!("{e}"))
|
||||
})?;
|
||||
|
||||
let config = Self {
|
||||
model,
|
||||
review_model,
|
||||
@@ -1357,10 +1440,11 @@ impl Config {
|
||||
// The config.toml omits "_mode" because it's a config file. However, "_mode"
|
||||
// is important in code to differentiate the mode from the store implementation.
|
||||
cli_auth_credentials_store_mode: cfg.cli_auth_credentials_store.unwrap_or_default(),
|
||||
mcp_servers: cfg.mcp_servers,
|
||||
mcp_servers,
|
||||
// The config.toml omits "_mode" because it's a config file. However, "_mode"
|
||||
// is important in code to differentiate the mode from the store implementation.
|
||||
mcp_oauth_credentials_store_mode: cfg.mcp_oauth_credentials_store.unwrap_or_default(),
|
||||
mcp_oauth_callback_port: cfg.mcp_oauth_callback_port,
|
||||
model_providers,
|
||||
project_doc_max_bytes: cfg.project_doc_max_bytes.unwrap_or(PROJECT_DOC_MAX_BYTES),
|
||||
project_doc_fallback_filenames: cfg
|
||||
@@ -1595,9 +1679,44 @@ mod tests {
|
||||
use core_test_support::test_absolute_path;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn stdio_mcp(command: &str) -> McpServerConfig {
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::Stdio {
|
||||
command: command.to_string(),
|
||||
args: Vec::new(),
|
||||
env: None,
|
||||
env_vars: Vec::new(),
|
||||
cwd: None,
|
||||
},
|
||||
enabled: true,
|
||||
startup_timeout_sec: None,
|
||||
tool_timeout_sec: None,
|
||||
enabled_tools: None,
|
||||
disabled_tools: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn http_mcp(url: &str) -> McpServerConfig {
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::StreamableHttp {
|
||||
url: url.to_string(),
|
||||
bearer_token_env_var: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
},
|
||||
enabled: true,
|
||||
startup_timeout_sec: None,
|
||||
tool_timeout_sec: None,
|
||||
enabled_tools: None,
|
||||
disabled_tools: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_toml_parsing() {
|
||||
let history_with_persistence = r#"
|
||||
@@ -1802,6 +1921,122 @@ trust_level = "trusted"
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn filter_mcp_servers_by_allowlist_enforces_identity_rules() {
|
||||
const MISMATCHED_COMMAND_SERVER: &str = "mismatched-command-should-disable";
|
||||
const MISMATCHED_URL_SERVER: &str = "mismatched-url-should-disable";
|
||||
const MATCHED_COMMAND_SERVER: &str = "matched-command-should-allow";
|
||||
const MATCHED_URL_SERVER: &str = "matched-url-should-allow";
|
||||
const DIFFERENT_NAME_SERVER: &str = "different-name-should-disable";
|
||||
|
||||
const GOOD_CMD: &str = "good-cmd";
|
||||
const GOOD_URL: &str = "https://example.com/good";
|
||||
|
||||
let mut servers = HashMap::from([
|
||||
(MISMATCHED_COMMAND_SERVER.to_string(), stdio_mcp("docs-cmd")),
|
||||
(
|
||||
MISMATCHED_URL_SERVER.to_string(),
|
||||
http_mcp("https://example.com/mcp"),
|
||||
),
|
||||
(MATCHED_COMMAND_SERVER.to_string(), stdio_mcp(GOOD_CMD)),
|
||||
(MATCHED_URL_SERVER.to_string(), http_mcp(GOOD_URL)),
|
||||
(DIFFERENT_NAME_SERVER.to_string(), stdio_mcp("same-cmd")),
|
||||
]);
|
||||
filter_mcp_servers_by_requirements(
|
||||
&mut servers,
|
||||
Some(&BTreeMap::from([
|
||||
(
|
||||
MISMATCHED_URL_SERVER.to_string(),
|
||||
McpServerRequirement {
|
||||
identity: McpServerIdentity::Url {
|
||||
url: "https://example.com/other".to_string(),
|
||||
},
|
||||
},
|
||||
),
|
||||
(
|
||||
MISMATCHED_COMMAND_SERVER.to_string(),
|
||||
McpServerRequirement {
|
||||
identity: McpServerIdentity::Command {
|
||||
command: "other-cmd".to_string(),
|
||||
},
|
||||
},
|
||||
),
|
||||
(
|
||||
MATCHED_URL_SERVER.to_string(),
|
||||
McpServerRequirement {
|
||||
identity: McpServerIdentity::Url {
|
||||
url: GOOD_URL.to_string(),
|
||||
},
|
||||
},
|
||||
),
|
||||
(
|
||||
MATCHED_COMMAND_SERVER.to_string(),
|
||||
McpServerRequirement {
|
||||
identity: McpServerIdentity::Command {
|
||||
command: GOOD_CMD.to_string(),
|
||||
},
|
||||
},
|
||||
),
|
||||
])),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
servers
|
||||
.iter()
|
||||
.map(|(name, server)| (name.clone(), server.enabled))
|
||||
.collect::<HashMap<String, bool>>(),
|
||||
HashMap::from([
|
||||
(MISMATCHED_URL_SERVER.to_string(), false),
|
||||
(MISMATCHED_COMMAND_SERVER.to_string(), false),
|
||||
(MATCHED_URL_SERVER.to_string(), true),
|
||||
(MATCHED_COMMAND_SERVER.to_string(), true),
|
||||
(DIFFERENT_NAME_SERVER.to_string(), false),
|
||||
])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn filter_mcp_servers_by_allowlist_allows_all_when_unset() {
|
||||
let mut servers = HashMap::from([
|
||||
("server-a".to_string(), stdio_mcp("cmd-a")),
|
||||
("server-b".to_string(), http_mcp("https://example.com/b")),
|
||||
]);
|
||||
|
||||
filter_mcp_servers_by_requirements(&mut servers, None);
|
||||
|
||||
assert_eq!(
|
||||
servers
|
||||
.iter()
|
||||
.map(|(name, server)| (name.clone(), server.enabled))
|
||||
.collect::<HashMap<String, bool>>(),
|
||||
HashMap::from([
|
||||
("server-a".to_string(), true),
|
||||
("server-b".to_string(), true),
|
||||
])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn filter_mcp_servers_by_allowlist_blocks_all_when_empty() {
|
||||
let mut servers = HashMap::from([
|
||||
("server-a".to_string(), stdio_mcp("cmd-a")),
|
||||
("server-b".to_string(), http_mcp("https://example.com/b")),
|
||||
]);
|
||||
|
||||
filter_mcp_servers_by_requirements(&mut servers, Some(&BTreeMap::new()));
|
||||
|
||||
assert_eq!(
|
||||
servers
|
||||
.iter()
|
||||
.map(|(name, server)| (name.clone(), server.enabled))
|
||||
.collect::<HashMap<String, bool>>(),
|
||||
HashMap::from([
|
||||
("server-a".to_string(), false),
|
||||
("server-b".to_string(), false),
|
||||
])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_dir_override_extends_workspace_writable_roots() -> std::io::Result<()> {
|
||||
let temp_dir = TempDir::new()?;
|
||||
@@ -3243,8 +3478,9 @@ model_verbosity = "high"
|
||||
notify: None,
|
||||
cwd: fixture.cwd(),
|
||||
cli_auth_credentials_store_mode: Default::default(),
|
||||
mcp_servers: HashMap::new(),
|
||||
mcp_servers: Constrained::allow_any(HashMap::new()),
|
||||
mcp_oauth_credentials_store_mode: Default::default(),
|
||||
mcp_oauth_callback_port: None,
|
||||
model_providers: fixture.model_provider_map.clone(),
|
||||
project_doc_max_bytes: PROJECT_DOC_MAX_BYTES,
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
@@ -3329,8 +3565,9 @@ model_verbosity = "high"
|
||||
notify: None,
|
||||
cwd: fixture.cwd(),
|
||||
cli_auth_credentials_store_mode: Default::default(),
|
||||
mcp_servers: HashMap::new(),
|
||||
mcp_servers: Constrained::allow_any(HashMap::new()),
|
||||
mcp_oauth_credentials_store_mode: Default::default(),
|
||||
mcp_oauth_callback_port: None,
|
||||
model_providers: fixture.model_provider_map.clone(),
|
||||
project_doc_max_bytes: PROJECT_DOC_MAX_BYTES,
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
@@ -3430,8 +3667,9 @@ model_verbosity = "high"
|
||||
notify: None,
|
||||
cwd: fixture.cwd(),
|
||||
cli_auth_credentials_store_mode: Default::default(),
|
||||
mcp_servers: HashMap::new(),
|
||||
mcp_servers: Constrained::allow_any(HashMap::new()),
|
||||
mcp_oauth_credentials_store_mode: Default::default(),
|
||||
mcp_oauth_callback_port: None,
|
||||
model_providers: fixture.model_provider_map.clone(),
|
||||
project_doc_max_bytes: PROJECT_DOC_MAX_BYTES,
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
@@ -3517,8 +3755,9 @@ model_verbosity = "high"
|
||||
notify: None,
|
||||
cwd: fixture.cwd(),
|
||||
cli_auth_credentials_store_mode: Default::default(),
|
||||
mcp_servers: HashMap::new(),
|
||||
mcp_servers: Constrained::allow_any(HashMap::new()),
|
||||
mcp_oauth_credentials_store_mode: Default::default(),
|
||||
mcp_oauth_callback_port: None,
|
||||
model_providers: fixture.model_provider_map.clone(),
|
||||
project_doc_max_bytes: PROJECT_DOC_MAX_BYTES,
|
||||
project_doc_fallback_filenames: Vec::new(),
|
||||
@@ -3832,6 +4071,34 @@ trust_level = "untrusted"
|
||||
assert_eq!(result, Some("explicit-provider".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn config_toml_deserializes_mcp_oauth_callback_port() {
|
||||
let toml = r#"mcp_oauth_callback_port = 4321"#;
|
||||
let cfg: ConfigToml =
|
||||
toml::from_str(toml).expect("TOML deserialization should succeed for callback port");
|
||||
assert_eq!(cfg.mcp_oauth_callback_port, Some(4321));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn config_loads_mcp_oauth_callback_port_from_toml() -> std::io::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let toml = r#"
|
||||
model = "gpt-5.1"
|
||||
mcp_oauth_callback_port = 5678
|
||||
"#;
|
||||
let cfg: ConfigToml =
|
||||
toml::from_str(toml).expect("TOML deserialization should succeed for callback port");
|
||||
|
||||
let config = Config::load_from_base_config_with_overrides(
|
||||
cfg,
|
||||
ConfigOverrides::default(),
|
||||
codex_home.path().to_path_buf(),
|
||||
)?;
|
||||
|
||||
assert_eq!(config.mcp_oauth_callback_port, Some(5678));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_untrusted_project_gets_unless_trusted_approval_policy() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
@@ -10,7 +11,8 @@ use codex_protocol::openai_models::ReasoningEffort;
|
||||
|
||||
/// Collection of common configuration options that a user can define as a unit
|
||||
/// in `config.toml`.
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct ConfigProfile {
|
||||
pub model: Option<String>,
|
||||
/// The key in the `model_providers` map identifying the
|
||||
@@ -32,6 +34,8 @@ pub struct ConfigProfile {
|
||||
pub analytics: Option<crate::config::types::AnalyticsConfigToml>,
|
||||
/// Optional feature toggles scoped to this profile.
|
||||
#[serde(default)]
|
||||
// Injects known feature keys into the schema and forbids unknown keys.
|
||||
#[schemars(schema_with = "crate::config::schema::features_schema")]
|
||||
pub features: Option<crate::features::FeaturesToml>,
|
||||
pub oss_provider: Option<String>,
|
||||
}
|
||||
|
||||
11
codex-rs/core/src/config/schema.md
Normal file
11
codex-rs/core/src/config/schema.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Config JSON Schema
|
||||
|
||||
We generate a JSON Schema for `~/.codex/config.toml` from the `ConfigToml` type
|
||||
and commit it at `codex-rs/core/config.schema.json` for editor integration.
|
||||
|
||||
When you change any fields included in `ConfigToml` (or nested config types),
|
||||
regenerate the schema:
|
||||
|
||||
```
|
||||
just write-config-schema
|
||||
```
|
||||
127
codex-rs/core/src/config/schema.rs
Normal file
127
codex-rs/core/src/config/schema.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
use crate::config::ConfigToml;
|
||||
use crate::config::types::RawMcpServerConfig;
|
||||
use crate::features::FEATURES;
|
||||
use schemars::r#gen::SchemaGenerator;
|
||||
use schemars::r#gen::SchemaSettings;
|
||||
use schemars::schema::InstanceType;
|
||||
use schemars::schema::ObjectValidation;
|
||||
use schemars::schema::RootSchema;
|
||||
use schemars::schema::Schema;
|
||||
use schemars::schema::SchemaObject;
|
||||
use std::path::Path;
|
||||
|
||||
/// Schema for the `[features]` map with known + legacy keys only.
|
||||
pub(crate) fn features_schema(schema_gen: &mut SchemaGenerator) -> Schema {
|
||||
let mut object = SchemaObject {
|
||||
instance_type: Some(InstanceType::Object.into()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut validation = ObjectValidation::default();
|
||||
for feature in FEATURES {
|
||||
validation
|
||||
.properties
|
||||
.insert(feature.key.to_string(), schema_gen.subschema_for::<bool>());
|
||||
}
|
||||
for legacy_key in crate::features::legacy_feature_keys() {
|
||||
validation
|
||||
.properties
|
||||
.insert(legacy_key.to_string(), schema_gen.subschema_for::<bool>());
|
||||
}
|
||||
validation.additional_properties = Some(Box::new(Schema::Bool(false)));
|
||||
object.object = Some(Box::new(validation));
|
||||
|
||||
Schema::Object(object)
|
||||
}
|
||||
|
||||
/// Schema for the `[mcp_servers]` map using the raw input shape.
|
||||
pub(crate) fn mcp_servers_schema(schema_gen: &mut SchemaGenerator) -> Schema {
|
||||
let mut object = SchemaObject {
|
||||
instance_type: Some(InstanceType::Object.into()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let validation = ObjectValidation {
|
||||
additional_properties: Some(Box::new(schema_gen.subschema_for::<RawMcpServerConfig>())),
|
||||
..Default::default()
|
||||
};
|
||||
object.object = Some(Box::new(validation));
|
||||
|
||||
Schema::Object(object)
|
||||
}
|
||||
|
||||
/// Build the config schema for `config.toml`.
|
||||
pub fn config_schema() -> RootSchema {
|
||||
SchemaSettings::draft07()
|
||||
.with(|settings| {
|
||||
settings.option_add_null_type = false;
|
||||
})
|
||||
.into_generator()
|
||||
.into_root_schema_for::<ConfigToml>()
|
||||
}
|
||||
|
||||
/// Render the config schema as pretty-printed JSON.
|
||||
pub fn config_schema_json() -> anyhow::Result<Vec<u8>> {
|
||||
let schema = config_schema();
|
||||
let json = serde_json::to_vec_pretty(&schema)?;
|
||||
Ok(json)
|
||||
}
|
||||
|
||||
/// Write the config schema fixture to disk.
|
||||
pub fn write_config_schema(out_path: &Path) -> anyhow::Result<()> {
|
||||
let json = config_schema_json()?;
|
||||
std::fs::write(out_path, json)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::config_schema_json;
|
||||
use serde_json::Map;
|
||||
use serde_json::Value;
|
||||
use similar::TextDiff;
|
||||
|
||||
fn canonicalize(value: &Value) -> Value {
|
||||
match value {
|
||||
Value::Array(items) => Value::Array(items.iter().map(canonicalize).collect()),
|
||||
Value::Object(map) => {
|
||||
let mut entries: Vec<_> = map.iter().collect();
|
||||
entries.sort_by(|(left, _), (right, _)| left.cmp(right));
|
||||
let mut sorted = Map::with_capacity(map.len());
|
||||
for (key, child) in entries {
|
||||
sorted.insert(key.clone(), canonicalize(child));
|
||||
}
|
||||
Value::Object(sorted)
|
||||
}
|
||||
_ => value.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn config_schema_matches_fixture() {
|
||||
let fixture_path = codex_utils_cargo_bin::find_resource!("config.schema.json")
|
||||
.expect("resolve config schema fixture path");
|
||||
let fixture = std::fs::read_to_string(fixture_path).expect("read config schema fixture");
|
||||
let fixture_value: serde_json::Value =
|
||||
serde_json::from_str(&fixture).expect("parse config schema fixture");
|
||||
let schema_json = config_schema_json().expect("serialize config schema");
|
||||
let schema_value: serde_json::Value =
|
||||
serde_json::from_slice(&schema_json).expect("decode schema json");
|
||||
let fixture_value = canonicalize(&fixture_value);
|
||||
let schema_value = canonicalize(&schema_value);
|
||||
if fixture_value != schema_value {
|
||||
let expected =
|
||||
serde_json::to_string_pretty(&fixture_value).expect("serialize fixture json");
|
||||
let actual =
|
||||
serde_json::to_string_pretty(&schema_value).expect("serialize schema json");
|
||||
let diff = TextDiff::from_lines(&expected, &actual)
|
||||
.unified_diff()
|
||||
.header("fixture", "generated")
|
||||
.to_string();
|
||||
panic!(
|
||||
"Current schema for `config.toml` doesn't match the fixture. \
|
||||
Run `just write-config-schema` to overwrite with your changes.\n\n{diff}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -11,6 +11,7 @@ use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
use wildmatch::WildMatchPattern;
|
||||
|
||||
use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Deserializer;
|
||||
use serde::Serialize;
|
||||
@@ -48,47 +49,51 @@ pub struct McpServerConfig {
|
||||
pub disabled_tools: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
// Raw MCP config shape used for deserialization and JSON Schema generation.
|
||||
// Keep this in sync with the validation logic in `McpServerConfig`.
|
||||
#[derive(Deserialize, Clone, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub(crate) struct RawMcpServerConfig {
|
||||
// stdio
|
||||
pub command: Option<String>,
|
||||
#[serde(default)]
|
||||
pub args: Option<Vec<String>>,
|
||||
#[serde(default)]
|
||||
pub env: Option<HashMap<String, String>>,
|
||||
#[serde(default)]
|
||||
pub env_vars: Option<Vec<String>>,
|
||||
#[serde(default)]
|
||||
pub cwd: Option<PathBuf>,
|
||||
pub http_headers: Option<HashMap<String, String>>,
|
||||
#[serde(default)]
|
||||
pub env_http_headers: Option<HashMap<String, String>>,
|
||||
|
||||
// streamable_http
|
||||
pub url: Option<String>,
|
||||
pub bearer_token: Option<String>,
|
||||
pub bearer_token_env_var: Option<String>,
|
||||
|
||||
// shared
|
||||
#[serde(default)]
|
||||
pub startup_timeout_sec: Option<f64>,
|
||||
#[serde(default)]
|
||||
pub startup_timeout_ms: Option<u64>,
|
||||
#[serde(default, with = "option_duration_secs")]
|
||||
#[schemars(with = "Option<f64>")]
|
||||
pub tool_timeout_sec: Option<Duration>,
|
||||
#[serde(default)]
|
||||
pub enabled: Option<bool>,
|
||||
#[serde(default)]
|
||||
pub enabled_tools: Option<Vec<String>>,
|
||||
#[serde(default)]
|
||||
pub disabled_tools: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for McpServerConfig {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
#[derive(Deserialize, Clone)]
|
||||
struct RawMcpServerConfig {
|
||||
// stdio
|
||||
command: Option<String>,
|
||||
#[serde(default)]
|
||||
args: Option<Vec<String>>,
|
||||
#[serde(default)]
|
||||
env: Option<HashMap<String, String>>,
|
||||
#[serde(default)]
|
||||
env_vars: Option<Vec<String>>,
|
||||
#[serde(default)]
|
||||
cwd: Option<PathBuf>,
|
||||
http_headers: Option<HashMap<String, String>>,
|
||||
#[serde(default)]
|
||||
env_http_headers: Option<HashMap<String, String>>,
|
||||
|
||||
// streamable_http
|
||||
url: Option<String>,
|
||||
bearer_token: Option<String>,
|
||||
bearer_token_env_var: Option<String>,
|
||||
|
||||
// shared
|
||||
#[serde(default)]
|
||||
startup_timeout_sec: Option<f64>,
|
||||
#[serde(default)]
|
||||
startup_timeout_ms: Option<u64>,
|
||||
#[serde(default, with = "option_duration_secs")]
|
||||
tool_timeout_sec: Option<Duration>,
|
||||
#[serde(default)]
|
||||
enabled: Option<bool>,
|
||||
#[serde(default)]
|
||||
enabled_tools: Option<Vec<String>>,
|
||||
#[serde(default)]
|
||||
disabled_tools: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
let mut raw = RawMcpServerConfig::deserialize(deserializer)?;
|
||||
|
||||
let startup_timeout_sec = match (raw.startup_timeout_sec, raw.startup_timeout_ms) {
|
||||
@@ -164,7 +169,7 @@ const fn default_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema)]
|
||||
#[serde(untagged, deny_unknown_fields, rename_all = "snake_case")]
|
||||
pub enum McpServerTransportConfig {
|
||||
/// https://modelcontextprotocol.io/specification/2025-06-18/basic/transports#stdio
|
||||
@@ -222,7 +227,7 @@ mod option_duration_secs {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, JsonSchema)]
|
||||
pub enum UriBasedFileOpener {
|
||||
#[serde(rename = "vscode")]
|
||||
VsCode,
|
||||
@@ -254,7 +259,8 @@ impl UriBasedFileOpener {
|
||||
}
|
||||
|
||||
/// Settings that govern if and what will be written to `~/.codex/history.jsonl`.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct History {
|
||||
/// If true, history entries will not be written to disk.
|
||||
pub persistence: HistoryPersistence,
|
||||
@@ -264,7 +270,7 @@ pub struct History {
|
||||
pub max_bytes: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum HistoryPersistence {
|
||||
/// Save all history entries to disk.
|
||||
@@ -277,13 +283,15 @@ pub enum HistoryPersistence {
|
||||
// ===== Analytics configuration =====
|
||||
|
||||
/// Analytics settings loaded from config.toml. Fields are optional so we can apply defaults.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct AnalyticsConfigToml {
|
||||
/// When `false`, disables analytics across Codex product surfaces in this profile.
|
||||
pub enabled: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct FeedbackConfigToml {
|
||||
/// When `false`, disables the feedback flow across Codex product surfaces.
|
||||
pub enabled: Option<bool>,
|
||||
@@ -291,7 +299,7 @@ pub struct FeedbackConfigToml {
|
||||
|
||||
// ===== OTEL configuration =====
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum OtelHttpProtocol {
|
||||
/// Binary payload
|
||||
@@ -300,7 +308,8 @@ pub enum OtelHttpProtocol {
|
||||
Json,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct OtelTlsConfig {
|
||||
pub ca_certificate: Option<AbsolutePathBuf>,
|
||||
@@ -309,7 +318,8 @@ pub struct OtelTlsConfig {
|
||||
}
|
||||
|
||||
/// Which OTEL exporter to use.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum OtelExporterKind {
|
||||
None,
|
||||
@@ -332,7 +342,8 @@ pub enum OtelExporterKind {
|
||||
}
|
||||
|
||||
/// OTEL settings loaded from config.toml. Fields are optional so we can apply defaults.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct OtelConfigToml {
|
||||
/// Log user prompt in traces
|
||||
pub log_user_prompt: Option<bool>,
|
||||
@@ -369,7 +380,7 @@ impl Default for OtelConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Debug, Clone, PartialEq, Eq, Deserialize)]
|
||||
#[derive(Serialize, Debug, Clone, PartialEq, Eq, Deserialize, JsonSchema)]
|
||||
#[serde(untagged)]
|
||||
pub enum Notifications {
|
||||
Enabled(bool),
|
||||
@@ -387,7 +398,7 @@ impl Default for Notifications {
|
||||
/// Terminals generally encode both mouse wheels and trackpads as the same "scroll up/down" mouse
|
||||
/// button events, without a magnitude. This setting controls whether Codex uses a heuristic to
|
||||
/// infer wheel vs trackpad per stream, or forces a specific behavior.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ScrollInputMode {
|
||||
/// Infer wheel vs trackpad behavior per scroll stream.
|
||||
@@ -405,7 +416,8 @@ impl Default for ScrollInputMode {
|
||||
}
|
||||
|
||||
/// Collection of settings that are specific to the TUI.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct Tui {
|
||||
/// Enable desktop notifications from the TUI when the terminal is unfocused.
|
||||
/// Defaults to `true`.
|
||||
@@ -544,7 +556,8 @@ const fn default_true() -> bool {
|
||||
/// Settings for notices we display to users via the tui and app-server clients
|
||||
/// (primarily the Codex IDE extension). NOTE: these are different from
|
||||
/// notifications - notices are warnings, NUX screens, acknowledgements, etc.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct Notice {
|
||||
/// Tracks whether the user has acknowledged the full access warning prompt.
|
||||
pub hide_full_access_warning: Option<bool>,
|
||||
@@ -567,7 +580,8 @@ impl Notice {
|
||||
pub(crate) const TABLE_KEY: &'static str = "notice";
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct SandboxWorkspaceWrite {
|
||||
#[serde(default)]
|
||||
pub writable_roots: Vec<AbsolutePathBuf>,
|
||||
@@ -590,7 +604,7 @@ impl From<SandboxWorkspaceWrite> for codex_app_server_protocol::SandboxSettings
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum ShellEnvironmentPolicyInherit {
|
||||
/// "Core" environment variables for the platform. On UNIX, this would
|
||||
@@ -607,7 +621,8 @@ pub enum ShellEnvironmentPolicyInherit {
|
||||
|
||||
/// Policy for building the `env` when spawning a process via either the
|
||||
/// `shell` or `local_shell` tool.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct ShellEnvironmentPolicyToml {
|
||||
pub inherit: Option<ShellEnvironmentPolicyInherit>,
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ use codex_protocol::protocol::AskForApproval;
|
||||
use codex_protocol::protocol::SandboxPolicy;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use serde::Deserialize;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt;
|
||||
|
||||
use crate::config::Constrained;
|
||||
@@ -43,6 +44,7 @@ impl fmt::Display for RequirementSource {
|
||||
pub struct ConfigRequirements {
|
||||
pub approval_policy: Constrained<AskForApproval>,
|
||||
pub sandbox_policy: Constrained<SandboxPolicy>,
|
||||
pub mcp_server_requirements: Option<BTreeMap<String, McpServerRequirement>>,
|
||||
}
|
||||
|
||||
impl Default for ConfigRequirements {
|
||||
@@ -50,15 +52,29 @@ impl Default for ConfigRequirements {
|
||||
Self {
|
||||
approval_policy: Constrained::allow_any_from_default(),
|
||||
sandbox_policy: Constrained::allow_any(SandboxPolicy::ReadOnly),
|
||||
mcp_server_requirements: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[serde(untagged)]
|
||||
pub enum McpServerIdentity {
|
||||
Command { command: String },
|
||||
Url { url: String },
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
pub struct McpServerRequirement {
|
||||
pub identity: McpServerIdentity,
|
||||
}
|
||||
|
||||
/// Base config deserialized from /etc/codex/requirements.toml or MDM.
|
||||
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
pub struct ConfigRequirementsToml {
|
||||
pub allowed_approval_policies: Option<Vec<AskForApproval>>,
|
||||
pub allowed_sandbox_modes: Option<Vec<SandboxModeRequirement>>,
|
||||
pub mcp_server_requirements: Option<BTreeMap<String, McpServerRequirement>>,
|
||||
}
|
||||
|
||||
/// Value paired with the requirement source it came from, for better error
|
||||
@@ -87,6 +103,7 @@ impl<T> std::ops::Deref for Sourced<T> {
|
||||
pub struct ConfigRequirementsWithSources {
|
||||
pub allowed_approval_policies: Option<Sourced<Vec<AskForApproval>>>,
|
||||
pub allowed_sandbox_modes: Option<Sourced<Vec<SandboxModeRequirement>>>,
|
||||
pub mcp_server_requirements: Option<Sourced<BTreeMap<String, McpServerRequirement>>>,
|
||||
}
|
||||
|
||||
impl ConfigRequirementsWithSources {
|
||||
@@ -114,7 +131,11 @@ impl ConfigRequirementsWithSources {
|
||||
self,
|
||||
other,
|
||||
source,
|
||||
{ allowed_approval_policies, allowed_sandbox_modes }
|
||||
{
|
||||
allowed_approval_policies,
|
||||
allowed_sandbox_modes,
|
||||
mcp_server_requirements,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@@ -122,10 +143,12 @@ impl ConfigRequirementsWithSources {
|
||||
let ConfigRequirementsWithSources {
|
||||
allowed_approval_policies,
|
||||
allowed_sandbox_modes,
|
||||
mcp_server_requirements,
|
||||
} = self;
|
||||
ConfigRequirementsToml {
|
||||
allowed_approval_policies: allowed_approval_policies.map(|sourced| sourced.value),
|
||||
allowed_sandbox_modes: allowed_sandbox_modes.map(|sourced| sourced.value),
|
||||
mcp_server_requirements: mcp_server_requirements.map(|sourced| sourced.value),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -159,7 +182,9 @@ impl From<SandboxMode> for SandboxModeRequirement {
|
||||
|
||||
impl ConfigRequirementsToml {
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.allowed_approval_policies.is_none() && self.allowed_sandbox_modes.is_none()
|
||||
self.allowed_approval_policies.is_none()
|
||||
&& self.allowed_sandbox_modes.is_none()
|
||||
&& self.mcp_server_requirements.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,6 +195,7 @@ impl TryFrom<ConfigRequirementsWithSources> for ConfigRequirements {
|
||||
let ConfigRequirementsWithSources {
|
||||
allowed_approval_policies,
|
||||
allowed_sandbox_modes,
|
||||
mcp_server_requirements,
|
||||
} = toml;
|
||||
|
||||
let approval_policy: Constrained<AskForApproval> = match allowed_approval_policies {
|
||||
@@ -247,6 +273,7 @@ impl TryFrom<ConfigRequirementsWithSources> for ConfigRequirements {
|
||||
Ok(ConfigRequirements {
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
mcp_server_requirements: mcp_server_requirements.map(|sourced| sourced.value),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -264,12 +291,15 @@ mod tests {
|
||||
let ConfigRequirementsToml {
|
||||
allowed_approval_policies,
|
||||
allowed_sandbox_modes,
|
||||
mcp_server_requirements,
|
||||
} = toml;
|
||||
ConfigRequirementsWithSources {
|
||||
allowed_approval_policies: allowed_approval_policies
|
||||
.map(|value| Sourced::new(value, RequirementSource::Unknown)),
|
||||
allowed_sandbox_modes: allowed_sandbox_modes
|
||||
.map(|value| Sourced::new(value, RequirementSource::Unknown)),
|
||||
mcp_server_requirements: mcp_server_requirements
|
||||
.map(|value| Sourced::new(value, RequirementSource::Unknown)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -289,6 +319,7 @@ mod tests {
|
||||
let other = ConfigRequirementsToml {
|
||||
allowed_approval_policies: Some(allowed_approval_policies.clone()),
|
||||
allowed_sandbox_modes: Some(allowed_sandbox_modes.clone()),
|
||||
mcp_server_requirements: None,
|
||||
};
|
||||
|
||||
target.merge_unset_fields(source.clone(), other);
|
||||
@@ -301,6 +332,7 @@ mod tests {
|
||||
source.clone()
|
||||
)),
|
||||
allowed_sandbox_modes: Some(Sourced::new(allowed_sandbox_modes, source)),
|
||||
mcp_server_requirements: None,
|
||||
}
|
||||
);
|
||||
}
|
||||
@@ -328,6 +360,7 @@ mod tests {
|
||||
source_location,
|
||||
)),
|
||||
allowed_sandbox_modes: None,
|
||||
mcp_server_requirements: None,
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
@@ -363,6 +396,7 @@ mod tests {
|
||||
existing_source,
|
||||
)),
|
||||
allowed_sandbox_modes: None,
|
||||
mcp_server_requirements: None,
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
@@ -523,4 +557,40 @@ mod tests {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_mcp_server_requirements() -> Result<()> {
|
||||
let toml_str = r#"
|
||||
[mcp_server_requirements.docs.identity]
|
||||
command = "codex-mcp"
|
||||
|
||||
[mcp_server_requirements.remote.identity]
|
||||
url = "https://example.com/mcp"
|
||||
"#;
|
||||
let requirements: ConfigRequirements =
|
||||
with_unknown_source(from_str(toml_str)?).try_into()?;
|
||||
|
||||
assert_eq!(
|
||||
requirements.mcp_server_requirements,
|
||||
Some(BTreeMap::from([
|
||||
(
|
||||
"docs".to_string(),
|
||||
McpServerRequirement {
|
||||
identity: McpServerIdentity::Command {
|
||||
command: "codex-mcp".to_string(),
|
||||
},
|
||||
},
|
||||
),
|
||||
(
|
||||
"remote".to_string(),
|
||||
McpServerRequirement {
|
||||
identity: McpServerIdentity::Url {
|
||||
url: "https://example.com/mcp".to_string(),
|
||||
},
|
||||
},
|
||||
),
|
||||
]))
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,8 @@ use toml::Value as TomlValue;
|
||||
|
||||
pub use config_requirements::ConfigRequirements;
|
||||
pub use config_requirements::ConfigRequirementsToml;
|
||||
pub use config_requirements::McpServerIdentity;
|
||||
pub use config_requirements::McpServerRequirement;
|
||||
pub use config_requirements::RequirementSource;
|
||||
pub use config_requirements::SandboxModeRequirement;
|
||||
pub use merge::merge_toml_values;
|
||||
|
||||
@@ -1,14 +1,9 @@
|
||||
use crate::codex::TurnContext;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::NetworkAccess;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::shell::Shell;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::protocol::ENVIRONMENT_CONTEXT_CLOSE_TAG;
|
||||
use codex_protocol::protocol::ENVIRONMENT_CONTEXT_OPEN_TAG;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::path::PathBuf;
|
||||
@@ -17,55 +12,12 @@ use std::path::PathBuf;
|
||||
#[serde(rename = "environment_context", rename_all = "snake_case")]
|
||||
pub(crate) struct EnvironmentContext {
|
||||
pub cwd: Option<PathBuf>,
|
||||
pub approval_policy: Option<AskForApproval>,
|
||||
pub sandbox_mode: Option<SandboxMode>,
|
||||
pub network_access: Option<NetworkAccess>,
|
||||
pub writable_roots: Option<Vec<AbsolutePathBuf>>,
|
||||
pub shell: Shell,
|
||||
}
|
||||
|
||||
impl EnvironmentContext {
|
||||
pub fn new(
|
||||
cwd: Option<PathBuf>,
|
||||
approval_policy: Option<AskForApproval>,
|
||||
sandbox_policy: Option<SandboxPolicy>,
|
||||
shell: Shell,
|
||||
) -> Self {
|
||||
Self {
|
||||
cwd,
|
||||
approval_policy,
|
||||
sandbox_mode: match sandbox_policy {
|
||||
Some(SandboxPolicy::DangerFullAccess) => Some(SandboxMode::DangerFullAccess),
|
||||
Some(SandboxPolicy::ReadOnly) => Some(SandboxMode::ReadOnly),
|
||||
Some(SandboxPolicy::ExternalSandbox { .. }) => Some(SandboxMode::DangerFullAccess),
|
||||
Some(SandboxPolicy::WorkspaceWrite { .. }) => Some(SandboxMode::WorkspaceWrite),
|
||||
None => None,
|
||||
},
|
||||
network_access: match sandbox_policy {
|
||||
Some(SandboxPolicy::DangerFullAccess) => Some(NetworkAccess::Enabled),
|
||||
Some(SandboxPolicy::ReadOnly) => Some(NetworkAccess::Restricted),
|
||||
Some(SandboxPolicy::ExternalSandbox { network_access }) => Some(network_access),
|
||||
Some(SandboxPolicy::WorkspaceWrite { network_access, .. }) => {
|
||||
if network_access {
|
||||
Some(NetworkAccess::Enabled)
|
||||
} else {
|
||||
Some(NetworkAccess::Restricted)
|
||||
}
|
||||
}
|
||||
None => None,
|
||||
},
|
||||
writable_roots: match sandbox_policy {
|
||||
Some(SandboxPolicy::WorkspaceWrite { writable_roots, .. }) => {
|
||||
if writable_roots.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(writable_roots)
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
},
|
||||
shell,
|
||||
}
|
||||
pub fn new(cwd: Option<PathBuf>, shell: Shell) -> Self {
|
||||
Self { cwd, shell }
|
||||
}
|
||||
|
||||
/// Compares two environment contexts, ignoring the shell. Useful when
|
||||
@@ -74,19 +26,11 @@ impl EnvironmentContext {
|
||||
pub fn equals_except_shell(&self, other: &EnvironmentContext) -> bool {
|
||||
let EnvironmentContext {
|
||||
cwd,
|
||||
approval_policy,
|
||||
sandbox_mode,
|
||||
network_access,
|
||||
writable_roots,
|
||||
// should compare all fields except shell
|
||||
shell: _,
|
||||
} = other;
|
||||
|
||||
self.cwd == *cwd
|
||||
&& self.approval_policy == *approval_policy
|
||||
&& self.sandbox_mode == *sandbox_mode
|
||||
&& self.network_access == *network_access
|
||||
&& self.writable_roots == *writable_roots
|
||||
}
|
||||
|
||||
pub fn diff(before: &TurnContext, after: &TurnContext, shell: &Shell) -> Self {
|
||||
@@ -95,26 +39,11 @@ impl EnvironmentContext {
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let approval_policy = if before.approval_policy != after.approval_policy {
|
||||
Some(after.approval_policy)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let sandbox_policy = if before.sandbox_policy != after.sandbox_policy {
|
||||
Some(after.sandbox_policy.clone())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
EnvironmentContext::new(cwd, approval_policy, sandbox_policy, shell.clone())
|
||||
EnvironmentContext::new(cwd, shell.clone())
|
||||
}
|
||||
|
||||
pub fn from_turn_context(turn_context: &TurnContext, shell: &Shell) -> Self {
|
||||
Self::new(
|
||||
Some(turn_context.cwd.clone()),
|
||||
Some(turn_context.approval_policy),
|
||||
Some(turn_context.sandbox_policy.clone()),
|
||||
shell.clone(),
|
||||
)
|
||||
Self::new(Some(turn_context.cwd.clone()), shell.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,10 +55,6 @@ impl EnvironmentContext {
|
||||
/// ```xml
|
||||
/// <environment_context>
|
||||
/// <cwd>...</cwd>
|
||||
/// <approval_policy>...</approval_policy>
|
||||
/// <sandbox_mode>...</sandbox_mode>
|
||||
/// <writable_roots>...</writable_roots>
|
||||
/// <network_access>...</network_access>
|
||||
/// <shell>...</shell>
|
||||
/// </environment_context>
|
||||
/// ```
|
||||
@@ -138,29 +63,6 @@ impl EnvironmentContext {
|
||||
if let Some(cwd) = self.cwd {
|
||||
lines.push(format!(" <cwd>{}</cwd>", cwd.to_string_lossy()));
|
||||
}
|
||||
if let Some(approval_policy) = self.approval_policy {
|
||||
lines.push(format!(
|
||||
" <approval_policy>{approval_policy}</approval_policy>"
|
||||
));
|
||||
}
|
||||
if let Some(sandbox_mode) = self.sandbox_mode {
|
||||
lines.push(format!(" <sandbox_mode>{sandbox_mode}</sandbox_mode>"));
|
||||
}
|
||||
if let Some(network_access) = self.network_access {
|
||||
lines.push(format!(
|
||||
" <network_access>{network_access}</network_access>"
|
||||
));
|
||||
}
|
||||
if let Some(writable_roots) = self.writable_roots {
|
||||
lines.push(" <writable_roots>".to_string());
|
||||
for writable_root in writable_roots {
|
||||
lines.push(format!(
|
||||
" <root>{}</root>",
|
||||
writable_root.to_string_lossy()
|
||||
));
|
||||
}
|
||||
lines.push(" </writable_roots>".to_string());
|
||||
}
|
||||
|
||||
let shell_name = self.shell.name();
|
||||
lines.push(format!(" <shell>{shell_name}</shell>"));
|
||||
@@ -187,7 +89,6 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use core_test_support::test_path_buf;
|
||||
use core_test_support::test_tmp_path_buf;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
fn fake_shell() -> Shell {
|
||||
@@ -198,50 +99,17 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn workspace_write_policy(writable_roots: Vec<&str>, network_access: bool) -> SandboxPolicy {
|
||||
SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: writable_roots
|
||||
.into_iter()
|
||||
.map(|s| AbsolutePathBuf::try_from(s).unwrap())
|
||||
.collect(),
|
||||
network_access,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_workspace_write_environment_context() {
|
||||
let cwd = test_path_buf("/repo");
|
||||
let writable_root = test_tmp_path_buf();
|
||||
let cwd_str = cwd.to_str().expect("cwd is valid utf-8");
|
||||
let writable_root_str = writable_root
|
||||
.to_str()
|
||||
.expect("writable root is valid utf-8");
|
||||
let context = EnvironmentContext::new(
|
||||
Some(cwd.clone()),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(
|
||||
vec![cwd_str, writable_root_str],
|
||||
false,
|
||||
)),
|
||||
fake_shell(),
|
||||
);
|
||||
let context = EnvironmentContext::new(Some(cwd.clone()), fake_shell());
|
||||
|
||||
let expected = format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{cwd}</cwd>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
<sandbox_mode>workspace-write</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
<writable_roots>
|
||||
<root>{cwd}</root>
|
||||
<root>{writable_root}</root>
|
||||
</writable_roots>
|
||||
<shell>bash</shell>
|
||||
</environment_context>"#,
|
||||
cwd = cwd.display(),
|
||||
writable_root = writable_root.display(),
|
||||
);
|
||||
|
||||
assert_eq!(context.serialize_to_xml(), expected);
|
||||
@@ -249,17 +117,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn serialize_read_only_environment_context() {
|
||||
let context = EnvironmentContext::new(
|
||||
None,
|
||||
Some(AskForApproval::Never),
|
||||
Some(SandboxPolicy::ReadOnly),
|
||||
fake_shell(),
|
||||
);
|
||||
let context = EnvironmentContext::new(None, fake_shell());
|
||||
|
||||
let expected = r#"<environment_context>
|
||||
<approval_policy>never</approval_policy>
|
||||
<sandbox_mode>read-only</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
<shell>bash</shell>
|
||||
</environment_context>"#;
|
||||
|
||||
@@ -268,19 +128,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn serialize_external_sandbox_environment_context() {
|
||||
let context = EnvironmentContext::new(
|
||||
None,
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::ExternalSandbox {
|
||||
network_access: NetworkAccess::Enabled,
|
||||
}),
|
||||
fake_shell(),
|
||||
);
|
||||
let context = EnvironmentContext::new(None, fake_shell());
|
||||
|
||||
let expected = r#"<environment_context>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
<sandbox_mode>danger-full-access</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
<shell>bash</shell>
|
||||
</environment_context>"#;
|
||||
|
||||
@@ -289,19 +139,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn serialize_external_sandbox_with_restricted_network_environment_context() {
|
||||
let context = EnvironmentContext::new(
|
||||
None,
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::ExternalSandbox {
|
||||
network_access: NetworkAccess::Restricted,
|
||||
}),
|
||||
fake_shell(),
|
||||
);
|
||||
let context = EnvironmentContext::new(None, fake_shell());
|
||||
|
||||
let expected = r#"<environment_context>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
<sandbox_mode>danger-full-access</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
<shell>bash</shell>
|
||||
</environment_context>"#;
|
||||
|
||||
@@ -310,17 +150,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn serialize_full_access_environment_context() {
|
||||
let context = EnvironmentContext::new(
|
||||
None,
|
||||
Some(AskForApproval::OnFailure),
|
||||
Some(SandboxPolicy::DangerFullAccess),
|
||||
fake_shell(),
|
||||
);
|
||||
let context = EnvironmentContext::new(None, fake_shell());
|
||||
|
||||
let expected = r#"<environment_context>
|
||||
<approval_policy>on-failure</approval_policy>
|
||||
<sandbox_mode>danger-full-access</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
<shell>bash</shell>
|
||||
</environment_context>"#;
|
||||
|
||||
@@ -328,55 +160,24 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn equals_except_shell_compares_approval_policy() {
|
||||
// Approval policy
|
||||
let context1 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo"], false)),
|
||||
fake_shell(),
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::Never),
|
||||
Some(workspace_write_policy(vec!["/repo"], true)),
|
||||
fake_shell(),
|
||||
);
|
||||
assert!(!context1.equals_except_shell(&context2));
|
||||
fn equals_except_shell_compares_cwd() {
|
||||
let context1 = EnvironmentContext::new(Some(PathBuf::from("/repo")), fake_shell());
|
||||
let context2 = EnvironmentContext::new(Some(PathBuf::from("/repo")), fake_shell());
|
||||
assert!(context1.equals_except_shell(&context2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn equals_except_shell_compares_sandbox_policy() {
|
||||
let context1 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::new_read_only_policy()),
|
||||
fake_shell(),
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(SandboxPolicy::new_workspace_write_policy()),
|
||||
fake_shell(),
|
||||
);
|
||||
fn equals_except_shell_ignores_sandbox_policy() {
|
||||
let context1 = EnvironmentContext::new(Some(PathBuf::from("/repo")), fake_shell());
|
||||
let context2 = EnvironmentContext::new(Some(PathBuf::from("/repo")), fake_shell());
|
||||
|
||||
assert!(!context1.equals_except_shell(&context2));
|
||||
assert!(context1.equals_except_shell(&context2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn equals_except_shell_compares_workspace_write_policy() {
|
||||
let context1 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo", "/tmp", "/var"], false)),
|
||||
fake_shell(),
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo", "/tmp"], true)),
|
||||
fake_shell(),
|
||||
);
|
||||
fn equals_except_shell_compares_cwd_differences() {
|
||||
let context1 = EnvironmentContext::new(Some(PathBuf::from("/repo1")), fake_shell());
|
||||
let context2 = EnvironmentContext::new(Some(PathBuf::from("/repo2")), fake_shell());
|
||||
|
||||
assert!(!context1.equals_except_shell(&context2));
|
||||
}
|
||||
@@ -385,8 +186,6 @@ mod tests {
|
||||
fn equals_except_shell_ignores_shell() {
|
||||
let context1 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo"], false)),
|
||||
Shell {
|
||||
shell_type: ShellType::Bash,
|
||||
shell_path: "/bin/bash".into(),
|
||||
@@ -395,8 +194,6 @@ mod tests {
|
||||
);
|
||||
let context2 = EnvironmentContext::new(
|
||||
Some(PathBuf::from("/repo")),
|
||||
Some(AskForApproval::OnRequest),
|
||||
Some(workspace_write_policy(vec!["/repo"], false)),
|
||||
Shell {
|
||||
shell_type: ShellType::Zsh,
|
||||
shell_path: "/bin/zsh".into(),
|
||||
|
||||
@@ -5,9 +5,10 @@ use std::sync::Arc;
|
||||
|
||||
use arc_swap::ArcSwap;
|
||||
|
||||
use crate::command_safety::is_dangerous_command::requires_initial_appoval;
|
||||
use crate::config_loader::ConfigLayerStack;
|
||||
use crate::config_loader::ConfigLayerStackOrdering;
|
||||
use crate::is_dangerous_command::command_might_be_dangerous;
|
||||
use crate::is_safe_command::is_known_safe_command;
|
||||
use codex_execpolicy::AmendError;
|
||||
use codex_execpolicy::Decision;
|
||||
use codex_execpolicy::Error as ExecPolicyRuleError;
|
||||
@@ -116,14 +117,15 @@ impl ExecPolicyManager {
|
||||
let exec_policy = self.current();
|
||||
let commands =
|
||||
parse_shell_lc_plain_commands(command).unwrap_or_else(|| vec![command.to_vec()]);
|
||||
let heuristics_fallback = |cmd: &[String]| {
|
||||
if requires_initial_appoval(approval_policy, sandbox_policy, cmd, sandbox_permissions) {
|
||||
Decision::Prompt
|
||||
} else {
|
||||
Decision::Allow
|
||||
}
|
||||
let exec_policy_fallback = |cmd: &[String]| {
|
||||
render_decision_for_unmatched_command(
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
cmd,
|
||||
sandbox_permissions,
|
||||
)
|
||||
};
|
||||
let evaluation = exec_policy.check_multiple(commands.iter(), &heuristics_fallback);
|
||||
let evaluation = exec_policy.check_multiple(commands.iter(), &exec_policy_fallback);
|
||||
|
||||
match evaluation.decision {
|
||||
Decision::Forbidden => ExecApprovalRequirement::Forbidden {
|
||||
@@ -242,6 +244,70 @@ pub async fn load_exec_policy(config_stack: &ConfigLayerStack) -> Result<Policy,
|
||||
Ok(policy)
|
||||
}
|
||||
|
||||
/// If a command is not matched by any execpolicy rule, derive a [`Decision`].
|
||||
pub fn render_decision_for_unmatched_command(
|
||||
approval_policy: AskForApproval,
|
||||
sandbox_policy: &SandboxPolicy,
|
||||
command: &[String],
|
||||
sandbox_permissions: SandboxPermissions,
|
||||
) -> Decision {
|
||||
if is_known_safe_command(command) {
|
||||
return Decision::Allow;
|
||||
}
|
||||
|
||||
// On Windows, ReadOnly sandbox is not a real sandbox, so special-case it
|
||||
// here.
|
||||
let runtime_sandbox_provides_safety =
|
||||
cfg!(windows) && matches!(sandbox_policy, SandboxPolicy::ReadOnly);
|
||||
|
||||
// If the command is flagged as dangerous or we have no sandbox protection,
|
||||
// we should never allow it to run without user approval.
|
||||
//
|
||||
// We prefer to prompt the user rather than outright forbid the command,
|
||||
// but if the user has explicitly disabled prompts, we must
|
||||
// forbid the command.
|
||||
if command_might_be_dangerous(command) || runtime_sandbox_provides_safety {
|
||||
return if matches!(approval_policy, AskForApproval::Never) {
|
||||
Decision::Forbidden
|
||||
} else {
|
||||
Decision::Prompt
|
||||
};
|
||||
}
|
||||
|
||||
match approval_policy {
|
||||
AskForApproval::Never | AskForApproval::OnFailure => {
|
||||
// We allow the command to run, relying on the sandbox for
|
||||
// protection.
|
||||
Decision::Allow
|
||||
}
|
||||
AskForApproval::UnlessTrusted => {
|
||||
// We already checked `is_known_safe_command(command)` and it
|
||||
// returned false, so we must prompt.
|
||||
Decision::Prompt
|
||||
}
|
||||
AskForApproval::OnRequest => {
|
||||
match sandbox_policy {
|
||||
SandboxPolicy::DangerFullAccess | SandboxPolicy::ExternalSandbox { .. } => {
|
||||
// The user has indicated we should "just run" commands
|
||||
// in their unrestricted environment, so we do so since the
|
||||
// command has not been flagged as dangerous.
|
||||
Decision::Allow
|
||||
}
|
||||
SandboxPolicy::ReadOnly | SandboxPolicy::WorkspaceWrite { .. } => {
|
||||
// In restricted sandboxes (ReadOnly/WorkspaceWrite), do not prompt for
|
||||
// non‑escalated, non‑dangerous commands — let the sandbox enforce
|
||||
// restrictions (e.g., block network/write) without a user prompt.
|
||||
if sandbox_permissions.requires_escalated_permissions() {
|
||||
Decision::Prompt
|
||||
} else {
|
||||
Decision::Allow
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_policy_path(codex_home: &Path) -> PathBuf {
|
||||
codex_home.join(RULES_DIR_NAME).join(DEFAULT_POLICY_FILE)
|
||||
}
|
||||
@@ -1051,4 +1117,108 @@ prefix_rule(
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
fn vec_str(items: &[&str]) -> Vec<String> {
|
||||
items.iter().map(std::string::ToString::to_string).collect()
|
||||
}
|
||||
|
||||
/// Note this test behaves differently on Windows because it exercises an
|
||||
/// `if cfg!(windows)` code path in render_decision_for_unmatched_command().
|
||||
#[tokio::test]
|
||||
async fn verify_approval_requirement_for_unsafe_powershell_command() {
|
||||
// `brew install powershell` to run this test on a Mac!
|
||||
// Note `pwsh` is required to parse a PowerShell command to see if it
|
||||
// is safe.
|
||||
if which::which("pwsh").is_err() {
|
||||
return;
|
||||
}
|
||||
|
||||
let policy = ExecPolicyManager::new(Arc::new(Policy::empty()));
|
||||
let features = Features::with_defaults();
|
||||
let permissions = SandboxPermissions::UseDefault;
|
||||
|
||||
// This command should not be run without user approval unless there is
|
||||
// a proper sandbox in place to ensure safety.
|
||||
let sneaky_command = vec_str(&["pwsh", "-Command", "echo hi @(calc)"]);
|
||||
let expected_amendment = Some(ExecPolicyAmendment::new(vec_str(&[
|
||||
"pwsh",
|
||||
"-Command",
|
||||
"echo hi @(calc)",
|
||||
])));
|
||||
let (pwsh_approval_reason, expected_req) = if cfg!(windows) {
|
||||
(
|
||||
r#"On Windows, SandboxPolicy::ReadOnly should be assumed to mean
|
||||
that no sandbox is present, so anything that is not "provably
|
||||
safe" should require approval."#,
|
||||
ExecApprovalRequirement::NeedsApproval {
|
||||
reason: None,
|
||||
proposed_execpolicy_amendment: expected_amendment.clone(),
|
||||
},
|
||||
)
|
||||
} else {
|
||||
(
|
||||
"On non-Windows, rely on the read-only sandbox to prevent harm.",
|
||||
ExecApprovalRequirement::Skip {
|
||||
bypass_sandbox: false,
|
||||
proposed_execpolicy_amendment: expected_amendment.clone(),
|
||||
},
|
||||
)
|
||||
};
|
||||
assert_eq!(
|
||||
expected_req,
|
||||
policy
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&features,
|
||||
&sneaky_command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
permissions,
|
||||
)
|
||||
.await,
|
||||
"{pwsh_approval_reason}"
|
||||
);
|
||||
|
||||
// This is flagged as a dangerous command on all platforms.
|
||||
let dangerous_command = vec_str(&["rm", "-rf", "/important/data"]);
|
||||
assert_eq!(
|
||||
ExecApprovalRequirement::NeedsApproval {
|
||||
reason: None,
|
||||
proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(vec_str(&[
|
||||
"rm",
|
||||
"-rf",
|
||||
"/important/data",
|
||||
]))),
|
||||
},
|
||||
policy
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&features,
|
||||
&dangerous_command,
|
||||
AskForApproval::OnRequest,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
permissions,
|
||||
)
|
||||
.await,
|
||||
r#"On all platforms, a forbidden command should require approval
|
||||
(unless AskForApproval::Never is specified)."#
|
||||
);
|
||||
|
||||
// A dangerous command should be forbidden if the user has specified
|
||||
// AskForApproval::Never.
|
||||
assert_eq!(
|
||||
ExecApprovalRequirement::Forbidden {
|
||||
reason: "`rm -rf /important/data` rejected: blocked by policy".to_string(),
|
||||
},
|
||||
policy
|
||||
.create_exec_approval_requirement_for_command(
|
||||
&features,
|
||||
&dangerous_command,
|
||||
AskForApproval::Never,
|
||||
&SandboxPolicy::ReadOnly,
|
||||
permissions,
|
||||
)
|
||||
.await,
|
||||
r#"On all platforms, a forbidden command should require approval
|
||||
(unless AskForApproval::Never is specified)."#
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
use crate::config::ConfigToml;
|
||||
use crate::config::profile::ConfigProfile;
|
||||
use codex_otel::OtelManager;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::BTreeMap;
|
||||
@@ -15,6 +16,7 @@ use std::collections::BTreeSet;
|
||||
|
||||
mod legacy;
|
||||
pub(crate) use legacy::LegacyFeatureToggles;
|
||||
pub(crate) use legacy::legacy_feature_keys;
|
||||
|
||||
/// High-level lifecycle stage for a feature.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
@@ -96,6 +98,8 @@ pub enum Feature {
|
||||
EnableRequestCompression,
|
||||
/// Enable collab tools.
|
||||
Collab,
|
||||
/// Steer feature flag - when enabled, Enter submits immediately instead of queuing.
|
||||
Steer,
|
||||
}
|
||||
|
||||
impl Feature {
|
||||
@@ -292,7 +296,7 @@ pub fn is_known_feature_key(key: &str) -> bool {
|
||||
}
|
||||
|
||||
/// Deserializable features table for TOML.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, JsonSchema)]
|
||||
pub struct FeaturesToml {
|
||||
#[serde(flatten)]
|
||||
pub entries: BTreeMap<String, bool>,
|
||||
@@ -420,4 +424,14 @@ pub const FEATURES: &[FeatureSpec] = &[
|
||||
stage: Stage::Experimental,
|
||||
default_enabled: false,
|
||||
},
|
||||
FeatureSpec {
|
||||
id: Feature::Steer,
|
||||
key: "steer",
|
||||
stage: Stage::Beta {
|
||||
name: "Steer conversation",
|
||||
menu_description: "Enter submits immediately; Tab queues messages when a task is running.",
|
||||
announcement: "NEW! Try Steer mode: Enter submits immediately, Tab queues. Enable in /experimental!",
|
||||
},
|
||||
default_enabled: false,
|
||||
},
|
||||
];
|
||||
|
||||
@@ -31,6 +31,10 @@ const ALIASES: &[Alias] = &[
|
||||
},
|
||||
];
|
||||
|
||||
pub(crate) fn legacy_feature_keys() -> impl Iterator<Item = &'static str> {
|
||||
ALIASES.iter().map(|alias| alias.legacy_key)
|
||||
}
|
||||
|
||||
pub(crate) fn feature_for_key(key: &str) -> Option<Feature> {
|
||||
ALIASES
|
||||
.iter()
|
||||
|
||||
@@ -57,6 +57,7 @@ pub use model_provider_info::DEFAULT_LMSTUDIO_PORT;
|
||||
pub use model_provider_info::DEFAULT_OLLAMA_PORT;
|
||||
pub use model_provider_info::LMSTUDIO_OSS_PROVIDER_ID;
|
||||
pub use model_provider_info::ModelProviderInfo;
|
||||
pub use model_provider_info::OLLAMA_CHAT_PROVIDER_ID;
|
||||
pub use model_provider_info::OLLAMA_OSS_PROVIDER_ID;
|
||||
pub use model_provider_info::WireApi;
|
||||
pub use model_provider_info::built_in_model_providers;
|
||||
@@ -126,6 +127,7 @@ pub use codex_protocol::protocol;
|
||||
pub use codex_protocol::config_types as protocol_config_types;
|
||||
|
||||
pub use client::ModelClient;
|
||||
pub use client::ModelClientSession;
|
||||
pub use client_common::Prompt;
|
||||
pub use client_common::REVIEW_PROMPT;
|
||||
pub use client_common::ResponseEvent;
|
||||
|
||||
@@ -47,7 +47,7 @@ pub async fn collect_mcp_snapshot(config: &Config) -> McpListToolsResponseEvent
|
||||
|
||||
mcp_connection_manager
|
||||
.initialize(
|
||||
config.mcp_servers.clone(),
|
||||
&config.mcp_servers,
|
||||
config.mcp_oauth_credentials_store_mode,
|
||||
auth_status_entries.clone(),
|
||||
tx_event,
|
||||
|
||||
@@ -312,7 +312,7 @@ pub(crate) struct McpConnectionManager {
|
||||
impl McpConnectionManager {
|
||||
pub async fn initialize(
|
||||
&mut self,
|
||||
mcp_servers: HashMap<String, McpServerConfig>,
|
||||
mcp_servers: &HashMap<String, McpServerConfig>,
|
||||
store_mode: OAuthCredentialsStoreMode,
|
||||
auth_entries: HashMap<String, McpAuthStatusEntry>,
|
||||
tx_event: Sender<Event>,
|
||||
@@ -325,6 +325,7 @@ impl McpConnectionManager {
|
||||
let mut clients = HashMap::new();
|
||||
let mut join_set = JoinSet::new();
|
||||
let elicitation_requests = ElicitationRequestManager::default();
|
||||
let mcp_servers = mcp_servers.clone();
|
||||
for (server_name, cfg) in mcp_servers.into_iter().filter(|(_, cfg)| cfg.enabled) {
|
||||
let cancel_token = cancel_token.child_token();
|
||||
let _ = emit_update(
|
||||
|
||||
@@ -12,6 +12,7 @@ use codex_app_server_protocol::AuthMode;
|
||||
use http::HeaderMap;
|
||||
use http::header::HeaderName;
|
||||
use http::header::HeaderValue;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
@@ -36,19 +37,24 @@ const OPENAI_PROVIDER_NAME: &str = "OpenAI";
|
||||
/// *Responses* API. The two protocols use different request/response shapes
|
||||
/// and *cannot* be auto-detected at runtime, therefore each provider entry
|
||||
/// must declare which one it expects.
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum WireApi {
|
||||
/// The Responses API exposed by OpenAI at `/v1/responses`.
|
||||
Responses,
|
||||
|
||||
/// Experimental: Responses API over WebSocket transport.
|
||||
#[serde(rename = "responses_websocket")]
|
||||
ResponsesWebsocket,
|
||||
|
||||
/// Regular Chat Completions compatible with `/v1/chat/completions`.
|
||||
#[default]
|
||||
Chat,
|
||||
}
|
||||
|
||||
/// Serializable representation of a provider definition.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, JsonSchema)]
|
||||
#[schemars(deny_unknown_fields)]
|
||||
pub struct ModelProviderInfo {
|
||||
/// Friendly display name.
|
||||
pub name: String,
|
||||
@@ -156,6 +162,7 @@ impl ModelProviderInfo {
|
||||
query_params: self.query_params.clone(),
|
||||
wire: match self.wire_api {
|
||||
WireApi::Responses => ApiWireApi::Responses,
|
||||
WireApi::ResponsesWebsocket => ApiWireApi::Responses,
|
||||
WireApi::Chat => ApiWireApi::Chat,
|
||||
},
|
||||
headers,
|
||||
@@ -260,6 +267,7 @@ pub const DEFAULT_OLLAMA_PORT: u16 = 11434;
|
||||
|
||||
pub const LMSTUDIO_OSS_PROVIDER_ID: &str = "lmstudio";
|
||||
pub const OLLAMA_OSS_PROVIDER_ID: &str = "ollama";
|
||||
pub const OLLAMA_CHAT_PROVIDER_ID: &str = "ollama-chat";
|
||||
|
||||
/// Built-in default provider list.
|
||||
pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
|
||||
@@ -273,6 +281,10 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
|
||||
("openai", P::create_openai_provider()),
|
||||
(
|
||||
OLLAMA_OSS_PROVIDER_ID,
|
||||
create_oss_provider(DEFAULT_OLLAMA_PORT, WireApi::Responses),
|
||||
),
|
||||
(
|
||||
OLLAMA_CHAT_PROVIDER_ID,
|
||||
create_oss_provider(DEFAULT_OLLAMA_PORT, WireApi::Chat),
|
||||
),
|
||||
(
|
||||
|
||||
@@ -3,6 +3,7 @@ use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ModelUpgrade;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
use indoc::indoc;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
pub const HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG: &str = "hide_gpt5_1_migration_prompt";
|
||||
@@ -318,6 +319,16 @@ fn gpt_52_codex_upgrade() -> ModelUpgrade {
|
||||
"Codex is now powered by gpt-5.2-codex, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work."
|
||||
.to_string(),
|
||||
),
|
||||
migration_markdown: Some(
|
||||
indoc! {r#"
|
||||
**Codex just got an upgrade. Introducing {model_to}.**
|
||||
|
||||
Codex is now powered by gpt-5.2-codex, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work. Learn more about {model_to} at https://openai.com/index/introducing-gpt-5-2-codex
|
||||
|
||||
You can continue using {model_from} if you prefer.
|
||||
"#}
|
||||
.to_string(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -174,6 +174,24 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn keeps_mutating_xargs_pipeline() {
|
||||
let inner = r#"rg -l QkBindingController presentation/src/main/java | xargs perl -pi -e 's/QkBindingController/QkController/g'"#;
|
||||
assert_parsed(
|
||||
&vec_str(&["bash", "-lc", inner]),
|
||||
vec![
|
||||
ParsedCommand::Search {
|
||||
cmd: "rg -l QkBindingController presentation/src/main/java".to_string(),
|
||||
query: Some("QkBindingController".to_string()),
|
||||
path: Some("java".to_string()),
|
||||
},
|
||||
ParsedCommand::Unknown {
|
||||
cmd: "xargs perl -pi -e s/QkBindingController/QkController/g".to_string(),
|
||||
},
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn supports_cat() {
|
||||
let inner = "cat webview/README.md";
|
||||
@@ -1404,8 +1422,9 @@ fn is_small_formatting_command(tokens: &[String]) -> bool {
|
||||
match cmd {
|
||||
// Always formatting; typically used in pipes.
|
||||
// `nl` is special-cased below to allow `nl <file>` to be treated as a read command.
|
||||
"wc" | "tr" | "cut" | "sort" | "uniq" | "xargs" | "tee" | "column" | "awk" | "yes"
|
||||
| "printf" => true,
|
||||
"wc" | "tr" | "cut" | "sort" | "uniq" | "tee" | "column" | "awk" | "yes" => true,
|
||||
"xargs" => !is_mutating_xargs_command(tokens),
|
||||
"printf" => true,
|
||||
"head" => {
|
||||
// Treat as formatting when no explicit file operand is present.
|
||||
// Common forms: `head -n 40`, `head -c 100`.
|
||||
@@ -1465,6 +1484,54 @@ fn is_small_formatting_command(tokens: &[String]) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
fn is_mutating_xargs_command(tokens: &[String]) -> bool {
|
||||
xargs_subcommand(tokens).is_some_and(xargs_is_mutating_subcommand)
|
||||
}
|
||||
|
||||
fn xargs_subcommand(tokens: &[String]) -> Option<&[String]> {
|
||||
if tokens.first().map(String::as_str) != Some("xargs") {
|
||||
return None;
|
||||
}
|
||||
let mut i = 1;
|
||||
while i < tokens.len() {
|
||||
let token = &tokens[i];
|
||||
if token == "--" {
|
||||
return tokens.get(i + 1..).filter(|rest| !rest.is_empty());
|
||||
}
|
||||
if !token.starts_with('-') {
|
||||
return tokens.get(i..).filter(|rest| !rest.is_empty());
|
||||
}
|
||||
let takes_value = matches!(
|
||||
token.as_str(),
|
||||
"-E" | "-e" | "-I" | "-L" | "-n" | "-P" | "-s"
|
||||
);
|
||||
if takes_value && token.len() == 2 {
|
||||
i += 2;
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn xargs_is_mutating_subcommand(tokens: &[String]) -> bool {
|
||||
let Some((head, tail)) = tokens.split_first() else {
|
||||
return false;
|
||||
};
|
||||
match head.as_str() {
|
||||
"perl" | "ruby" => xargs_has_in_place_flag(tail),
|
||||
"sed" => xargs_has_in_place_flag(tail) || tail.iter().any(|token| token == "--in-place"),
|
||||
"rg" => tail.iter().any(|token| token == "--replace"),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn xargs_has_in_place_flag(tokens: &[String]) -> bool {
|
||||
tokens.iter().any(|token| {
|
||||
token == "-i" || token.starts_with("-i") || token == "-pi" || token.starts_with("-pi")
|
||||
})
|
||||
}
|
||||
|
||||
fn drop_small_formatting_commands(mut commands: Vec<Vec<String>>) -> Vec<Vec<String>> {
|
||||
commands.retain(|tokens| !is_small_formatting_command(tokens));
|
||||
commands
|
||||
|
||||
@@ -206,6 +206,7 @@ mod tests {
|
||||
RolloutItem::ResponseItem(items[0].clone()),
|
||||
RolloutItem::ResponseItem(items[1].clone()),
|
||||
RolloutItem::ResponseItem(items[2].clone()),
|
||||
RolloutItem::ResponseItem(items[3].clone()),
|
||||
];
|
||||
|
||||
assert_eq!(
|
||||
|
||||
@@ -17,7 +17,7 @@ use tokio_util::sync::CancellationToken;
|
||||
|
||||
pub(crate) struct SessionServices {
|
||||
pub(crate) mcp_connection_manager: Arc<RwLock<McpConnectionManager>>,
|
||||
pub(crate) mcp_startup_cancellation_token: CancellationToken,
|
||||
pub(crate) mcp_startup_cancellation_token: Mutex<CancellationToken>,
|
||||
pub(crate) unified_exec_manager: UnifiedExecProcessManager,
|
||||
pub(crate) notifier: UserNotifier,
|
||||
pub(crate) rollout: Mutex<Option<RolloutRecorder>>,
|
||||
|
||||
@@ -104,6 +104,10 @@ impl TurnState {
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn has_pending_input(&self) -> bool {
|
||||
!self.pending_input.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActiveTurn {
|
||||
|
||||
@@ -29,8 +29,10 @@ impl SessionTask for RegularTask {
|
||||
cancellation_token: CancellationToken,
|
||||
) -> Option<String> {
|
||||
let sess = session.clone_session();
|
||||
let run_turn_span =
|
||||
trace_span!(parent: sess.services.otel_manager.current_span(), "run_turn");
|
||||
let run_turn_span = trace_span!("run_turn");
|
||||
sess.services
|
||||
.otel_manager
|
||||
.apply_traceparent_parent(&run_turn_span);
|
||||
run_turn(sess, ctx, input, cancellation_token)
|
||||
.instrument(run_turn_span)
|
||||
.await
|
||||
|
||||
@@ -21,6 +21,7 @@ use crate::skills::SkillsManager;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::protocol::InitialHistory;
|
||||
use codex_protocol::protocol::McpServerRefreshConfig;
|
||||
use codex_protocol::protocol::Op;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::SessionSource;
|
||||
@@ -30,6 +31,7 @@ use std::sync::Arc;
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
use tempfile::TempDir;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::warn;
|
||||
|
||||
/// Represents a newly created Codex thread (formerly called a conversation), including the first event
|
||||
/// (which is [`EventMsg::SessionConfigured`]).
|
||||
@@ -144,6 +146,27 @@ impl ThreadManager {
|
||||
self.state.threads.read().await.keys().copied().collect()
|
||||
}
|
||||
|
||||
pub async fn refresh_mcp_servers(&self, refresh_config: McpServerRefreshConfig) {
|
||||
let threads = self
|
||||
.state
|
||||
.threads
|
||||
.read()
|
||||
.await
|
||||
.values()
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
for thread in threads {
|
||||
if let Err(err) = thread
|
||||
.submit(Op::RefreshMcpServers {
|
||||
config: refresh_config.clone(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!("failed to request MCP server refresh: {err}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_thread(&self, thread_id: ThreadId) -> CodexResult<Arc<CodexThread>> {
|
||||
self.state.get_thread(thread_id).await
|
||||
}
|
||||
@@ -226,6 +249,7 @@ impl ThreadManager {
|
||||
}
|
||||
|
||||
impl ThreadManagerState {
|
||||
/// Fetch a thread by ID or return ThreadNotFound.
|
||||
pub(crate) async fn get_thread(&self, thread_id: ThreadId) -> CodexResult<Arc<CodexThread>> {
|
||||
let threads = self.threads.read().await;
|
||||
threads
|
||||
@@ -234,6 +258,7 @@ impl ThreadManagerState {
|
||||
.ok_or_else(|| CodexErr::ThreadNotFound(thread_id))
|
||||
}
|
||||
|
||||
/// Send an operation to a thread by ID.
|
||||
pub(crate) async fn send_op(&self, thread_id: ThreadId, op: Op) -> CodexResult<String> {
|
||||
let thread = self.get_thread(thread_id).await?;
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
@@ -245,7 +270,12 @@ impl ThreadManagerState {
|
||||
thread.submit(op).await
|
||||
}
|
||||
|
||||
#[allow(dead_code)] // Used by upcoming multi-agent tooling.
|
||||
/// Remove a thread from the manager by ID, returning it when present.
|
||||
pub(crate) async fn remove_thread(&self, thread_id: &ThreadId) -> Option<Arc<CodexThread>> {
|
||||
self.threads.write().await.remove(thread_id)
|
||||
}
|
||||
|
||||
/// Spawn a new thread with no history using a provided config.
|
||||
pub(crate) async fn spawn_new_thread(
|
||||
&self,
|
||||
config: Config,
|
||||
@@ -260,6 +290,7 @@ impl ThreadManagerState {
|
||||
.await
|
||||
}
|
||||
|
||||
/// Spawn a new thread with optional history and register it with the manager.
|
||||
pub(crate) async fn spawn_thread(
|
||||
&self,
|
||||
config: Config,
|
||||
@@ -427,6 +458,7 @@ mod tests {
|
||||
RolloutItem::ResponseItem(items[0].clone()),
|
||||
RolloutItem::ResponseItem(items[1].clone()),
|
||||
RolloutItem::ResponseItem(items[2].clone()),
|
||||
RolloutItem::ResponseItem(items[3].clone()),
|
||||
];
|
||||
|
||||
assert_eq!(
|
||||
|
||||
@@ -74,6 +74,11 @@ mod spawn {
|
||||
message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
struct SpawnAgentResult {
|
||||
agent_id: String,
|
||||
}
|
||||
|
||||
pub async fn handle(
|
||||
session: Arc<Session>,
|
||||
turn: Arc<TurnContext>,
|
||||
@@ -82,7 +87,7 @@ mod spawn {
|
||||
let args: SpawnAgentArgs = parse_arguments(&arguments)?;
|
||||
if args.message.trim().is_empty() {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"Empty message can't be send to an agent".to_string(),
|
||||
"Empty message can't be sent to an agent".to_string(),
|
||||
));
|
||||
}
|
||||
let config = build_agent_spawn_config(turn.as_ref())?;
|
||||
@@ -91,10 +96,17 @@ mod spawn {
|
||||
.agent_control
|
||||
.spawn_agent(config, args.message, true)
|
||||
.await
|
||||
.map_err(|err| FunctionCallError::Fatal(err.to_string()))?;
|
||||
.map_err(collab_spawn_error)?;
|
||||
|
||||
let content = serde_json::to_string(&SpawnAgentResult {
|
||||
agent_id: result.to_string(),
|
||||
})
|
||||
.map_err(|err| {
|
||||
FunctionCallError::Fatal(format!("failed to serialize spawn_agent result: {err}"))
|
||||
})?;
|
||||
|
||||
Ok(ToolOutput::Function {
|
||||
content: format!("agent_id: {result}"),
|
||||
content,
|
||||
success: Some(true),
|
||||
content_items: None,
|
||||
})
|
||||
@@ -112,6 +124,11 @@ mod send_input {
|
||||
message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
struct SendInputResult {
|
||||
submission_id: String,
|
||||
}
|
||||
|
||||
pub async fn handle(
|
||||
session: Arc<Session>,
|
||||
arguments: String,
|
||||
@@ -120,20 +137,20 @@ mod send_input {
|
||||
let agent_id = agent_id(&args.id)?;
|
||||
if args.message.trim().is_empty() {
|
||||
return Err(FunctionCallError::RespondToModel(
|
||||
"Empty message can't be send to an agent".to_string(),
|
||||
"Empty message can't be sent to an agent".to_string(),
|
||||
));
|
||||
}
|
||||
let content = session
|
||||
let agent_id_for_err = agent_id;
|
||||
let submission_id = session
|
||||
.services
|
||||
.agent_control
|
||||
.send_prompt(agent_id, args.message)
|
||||
.await
|
||||
.map_err(|err| match err {
|
||||
CodexErr::ThreadNotFound(id) => {
|
||||
FunctionCallError::RespondToModel(format!("agent with id {id} not found"))
|
||||
}
|
||||
err => FunctionCallError::Fatal(err.to_string()),
|
||||
})?;
|
||||
.map_err(|err| collab_agent_error(agent_id_for_err, err))?;
|
||||
|
||||
let content = serde_json::to_string(&SendInputResult { submission_id }).map_err(|err| {
|
||||
FunctionCallError::Fatal(format!("failed to serialize send_input result: {err}"))
|
||||
})?;
|
||||
|
||||
Ok(ToolOutput::Function {
|
||||
content,
|
||||
@@ -182,17 +199,13 @@ mod wait {
|
||||
ms => ms.min(MAX_WAIT_TIMEOUT_MS),
|
||||
};
|
||||
|
||||
let agent_id_for_err = agent_id;
|
||||
let mut status_rx = session
|
||||
.services
|
||||
.agent_control
|
||||
.subscribe_status(agent_id)
|
||||
.await
|
||||
.map_err(|err| match err {
|
||||
CodexErr::ThreadNotFound(id) => {
|
||||
FunctionCallError::RespondToModel(format!("agent with id {id} not found"))
|
||||
}
|
||||
err => FunctionCallError::Fatal(err.to_string()),
|
||||
})?;
|
||||
.map_err(|err| collab_agent_error(agent_id_for_err, err))?;
|
||||
|
||||
// Get last known status.
|
||||
let mut status = status_rx.borrow_and_update().clone();
|
||||
@@ -230,9 +243,11 @@ mod wait {
|
||||
FunctionCallError::Fatal(format!("failed to serialize wait result: {err}"))
|
||||
})?;
|
||||
|
||||
let success = !result.timed_out && !matches!(result.status, AgentStatus::Errored(_));
|
||||
|
||||
Ok(ToolOutput::Function {
|
||||
content,
|
||||
success: Some(!result.timed_out),
|
||||
success: Some(success),
|
||||
content_items: None,
|
||||
})
|
||||
}
|
||||
@@ -254,31 +269,23 @@ pub mod close_agent {
|
||||
) -> Result<ToolOutput, FunctionCallError> {
|
||||
let args: CloseAgentArgs = parse_arguments(&arguments)?;
|
||||
let agent_id = agent_id(&args.id)?;
|
||||
let agent_id_for_err = agent_id;
|
||||
let mut status_rx = session
|
||||
.services
|
||||
.agent_control
|
||||
.subscribe_status(agent_id)
|
||||
.await
|
||||
.map_err(|err| match err {
|
||||
CodexErr::ThreadNotFound(id) => {
|
||||
FunctionCallError::RespondToModel(format!("agent with id {id} not found"))
|
||||
}
|
||||
err => FunctionCallError::Fatal(err.to_string()),
|
||||
})?;
|
||||
.map_err(|err| collab_agent_error(agent_id_for_err, err))?;
|
||||
let status = status_rx.borrow_and_update().clone();
|
||||
|
||||
if !matches!(status, AgentStatus::Shutdown) {
|
||||
let agent_id_for_err = agent_id;
|
||||
let _ = session
|
||||
.services
|
||||
.agent_control
|
||||
.shutdown_agent(agent_id)
|
||||
.await
|
||||
.map_err(|err| match err {
|
||||
CodexErr::ThreadNotFound(id) => {
|
||||
FunctionCallError::RespondToModel(format!("agent with id {id} not found"))
|
||||
}
|
||||
err => FunctionCallError::Fatal(err.to_string()),
|
||||
})?;
|
||||
.map_err(|err| collab_agent_error(agent_id_for_err, err))?;
|
||||
}
|
||||
|
||||
let content = serde_json::to_string(&CloseAgentResult { status }).map_err(|err| {
|
||||
@@ -298,6 +305,30 @@ fn agent_id(id: &str) -> Result<ThreadId, FunctionCallError> {
|
||||
.map_err(|e| FunctionCallError::RespondToModel(format!("invalid agent id {id}: {e:?}")))
|
||||
}
|
||||
|
||||
fn collab_spawn_error(err: CodexErr) -> FunctionCallError {
|
||||
match err {
|
||||
CodexErr::UnsupportedOperation(_) => {
|
||||
FunctionCallError::RespondToModel("collab manager unavailable".to_string())
|
||||
}
|
||||
err => FunctionCallError::RespondToModel(format!("collab spawn failed: {err}")),
|
||||
}
|
||||
}
|
||||
|
||||
fn collab_agent_error(agent_id: ThreadId, err: CodexErr) -> FunctionCallError {
|
||||
match err {
|
||||
CodexErr::ThreadNotFound(id) => {
|
||||
FunctionCallError::RespondToModel(format!("agent with id {id} not found"))
|
||||
}
|
||||
CodexErr::InternalAgentDied => {
|
||||
FunctionCallError::RespondToModel(format!("agent with id {agent_id} is closed"))
|
||||
}
|
||||
CodexErr::UnsupportedOperation(_) => {
|
||||
FunctionCallError::RespondToModel("collab manager unavailable".to_string())
|
||||
}
|
||||
err => FunctionCallError::RespondToModel(format!("collab tool failed: {err}")),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_agent_spawn_config(turn: &TurnContext) -> Result<Config, FunctionCallError> {
|
||||
let base_config = turn.client.config();
|
||||
let mut config = (*base_config).clone();
|
||||
@@ -433,7 +464,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
err,
|
||||
FunctionCallError::RespondToModel(
|
||||
"Empty message can't be send to an agent".to_string()
|
||||
"Empty message can't be sent to an agent".to_string()
|
||||
)
|
||||
);
|
||||
}
|
||||
@@ -452,7 +483,7 @@ mod tests {
|
||||
};
|
||||
assert_eq!(
|
||||
err,
|
||||
FunctionCallError::Fatal("unsupported operation: thread manager dropped".to_string())
|
||||
FunctionCallError::RespondToModel("collab manager unavailable".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
@@ -471,7 +502,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
err,
|
||||
FunctionCallError::RespondToModel(
|
||||
"Empty message can't be send to an agent".to_string()
|
||||
"Empty message can't be sent to an agent".to_string()
|
||||
)
|
||||
);
|
||||
}
|
||||
@@ -664,6 +695,9 @@ mod tests {
|
||||
.iter()
|
||||
.any(|(id, op)| *id == agent_id && matches!(op, Op::Shutdown));
|
||||
assert_eq!(submitted_shutdown, true);
|
||||
|
||||
let status_after = manager.agent_control().get_status(agent_id).await;
|
||||
assert_eq!(status_after, AgentStatus::NotFound);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -30,10 +30,16 @@ pub trait ToolHandler: Send + Sync {
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns `true` if the [ToolInvocation] *might* mutate the environment of the
|
||||
/// user (through file system, OS operations, ...).
|
||||
/// This function must remains defensive and return `true` if a doubt exist on the
|
||||
/// exact effect of a ToolInvocation.
|
||||
async fn is_mutating(&self, _invocation: &ToolInvocation) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
/// Perform the actual [ToolInvocation] and returns a [ToolOutput] containing
|
||||
/// the final output to return to the model.
|
||||
async fn handle(&self, invocation: ToolInvocation) -> Result<ToolOutput, FunctionCallError>;
|
||||
}
|
||||
|
||||
|
||||
@@ -75,34 +75,6 @@ impl From<SkillInstructions> for ResponseItem {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename = "developer_instructions", rename_all = "snake_case")]
|
||||
pub(crate) struct DeveloperInstructions {
|
||||
text: String,
|
||||
}
|
||||
|
||||
impl DeveloperInstructions {
|
||||
pub fn new<T: Into<String>>(text: T) -> Self {
|
||||
Self { text: text.into() }
|
||||
}
|
||||
|
||||
pub fn into_text(self) -> String {
|
||||
self.text
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DeveloperInstructions> for ResponseItem {
|
||||
fn from(di: DeveloperInstructions) -> Self {
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "developer".to_string(),
|
||||
content: vec![ContentItem::InputText {
|
||||
text: di.into_text(),
|
||||
}],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -88,7 +88,7 @@ async fn run_request(input: Vec<ResponseItem>) -> Value {
|
||||
SessionSource::Exec,
|
||||
);
|
||||
|
||||
let client = ModelClient::new(
|
||||
let mut client_session = ModelClient::new(
|
||||
Arc::clone(&config),
|
||||
None,
|
||||
model_info,
|
||||
@@ -98,12 +98,13 @@ async fn run_request(input: Vec<ResponseItem>) -> Value {
|
||||
summary,
|
||||
conversation_id,
|
||||
SessionSource::Exec,
|
||||
);
|
||||
)
|
||||
.new_session();
|
||||
|
||||
let mut prompt = Prompt::default();
|
||||
prompt.input = input;
|
||||
|
||||
let mut stream = match client.stream(&prompt).await {
|
||||
let mut stream = match client_session.stream(&prompt).await {
|
||||
Ok(s) => s,
|
||||
Err(e) => panic!("stream chat failed: {e}"),
|
||||
};
|
||||
|
||||
@@ -89,7 +89,7 @@ async fn run_stream_with_bytes(sse_body: &[u8]) -> Vec<ResponseEvent> {
|
||||
SessionSource::Exec,
|
||||
);
|
||||
|
||||
let client = ModelClient::new(
|
||||
let mut client = ModelClient::new(
|
||||
Arc::clone(&config),
|
||||
None,
|
||||
model_info,
|
||||
@@ -99,7 +99,8 @@ async fn run_stream_with_bytes(sse_body: &[u8]) -> Vec<ResponseEvent> {
|
||||
summary,
|
||||
conversation_id,
|
||||
SessionSource::Exec,
|
||||
);
|
||||
)
|
||||
.new_session();
|
||||
|
||||
let mut prompt = Prompt::default();
|
||||
prompt.input = vec![ResponseItem::Message {
|
||||
|
||||
@@ -15,11 +15,13 @@ codex-core = { workspace = true, features = ["test-support"] }
|
||||
codex-protocol = { workspace = true }
|
||||
codex-utils-absolute-path = { workspace = true }
|
||||
codex-utils-cargo-bin = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
notify = { workspace = true }
|
||||
regex-lite = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tokio = { workspace = true, features = ["time"] }
|
||||
tokio = { workspace = true, features = ["net", "time"] }
|
||||
tokio-tungstenite = { workspace = true }
|
||||
walkdir = { workspace = true }
|
||||
wiremock = { workspace = true }
|
||||
shlex = { workspace = true }
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::time::Duration;
|
||||
@@ -5,7 +6,12 @@ use std::time::Duration;
|
||||
use anyhow::Result;
|
||||
use base64::Engine;
|
||||
use codex_protocol::openai_models::ModelsResponse;
|
||||
use futures::SinkExt;
|
||||
use futures::StreamExt;
|
||||
use serde_json::Value;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio_tungstenite::tungstenite::Message;
|
||||
use wiremock::BodyPrintLimit;
|
||||
use wiremock::Match;
|
||||
use wiremock::Mock;
|
||||
@@ -199,6 +205,47 @@ impl ResponsesRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WebSocketRequest {
|
||||
body: Value,
|
||||
}
|
||||
|
||||
impl WebSocketRequest {
|
||||
pub fn body_json(&self) -> Value {
|
||||
self.body.clone()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WebSocketTestServer {
|
||||
uri: String,
|
||||
connections: Arc<Mutex<Vec<Vec<WebSocketRequest>>>>,
|
||||
shutdown: oneshot::Sender<()>,
|
||||
task: tokio::task::JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl WebSocketTestServer {
|
||||
pub fn uri(&self) -> &str {
|
||||
&self.uri
|
||||
}
|
||||
|
||||
pub fn connections(&self) -> Vec<Vec<WebSocketRequest>> {
|
||||
self.connections.lock().unwrap().clone()
|
||||
}
|
||||
|
||||
pub fn single_connection(&self) -> Vec<WebSocketRequest> {
|
||||
let connections = self.connections.lock().unwrap();
|
||||
if connections.len() != 1 {
|
||||
panic!("expected 1 connection, got {}", connections.len());
|
||||
}
|
||||
connections.first().cloned().unwrap_or_default()
|
||||
}
|
||||
|
||||
pub async fn shutdown(self) {
|
||||
let _ = self.shutdown.send(());
|
||||
let _ = self.task.await;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ModelsMock {
|
||||
requests: Arc<Mutex<Vec<wiremock::Request>>>,
|
||||
@@ -272,6 +319,15 @@ pub fn ev_completed(id: &str) -> Value {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn ev_done() -> Value {
|
||||
serde_json::json!({
|
||||
"type": "response.done",
|
||||
"response": {
|
||||
"usage": {"input_tokens":0,"input_tokens_details":null,"output_tokens":0,"output_tokens_details":null,"total_tokens":0}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Convenience: SSE event for a created response with a specific id.
|
||||
pub fn ev_response_created(id: &str) -> Value {
|
||||
serde_json::json!({
|
||||
@@ -724,6 +780,97 @@ pub async fn start_mock_server() -> MockServer {
|
||||
server
|
||||
}
|
||||
|
||||
/// Starts a lightweight WebSocket server for `/v1/responses` tests.
|
||||
///
|
||||
/// Each connection consumes a queue of request/event sequences. For each
|
||||
/// request message, the server records the payload and streams the matching
|
||||
/// events as WebSocket text frames before moving to the next request.
|
||||
pub async fn start_websocket_server(connections: Vec<Vec<Vec<Value>>>) -> WebSocketTestServer {
|
||||
let listener = TcpListener::bind("127.0.0.1:0")
|
||||
.await
|
||||
.expect("bind websocket server");
|
||||
let addr = listener.local_addr().expect("websocket server address");
|
||||
let uri = format!("ws://{addr}");
|
||||
let connections_log = Arc::new(Mutex::new(Vec::new()));
|
||||
let requests = Arc::clone(&connections_log);
|
||||
let connections = Arc::new(Mutex::new(VecDeque::from(connections)));
|
||||
let (shutdown_tx, mut shutdown_rx) = oneshot::channel();
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
loop {
|
||||
let accept_res = tokio::select! {
|
||||
_ = &mut shutdown_rx => return,
|
||||
accept_res = listener.accept() => accept_res,
|
||||
};
|
||||
let (stream, _) = match accept_res {
|
||||
Ok(value) => value,
|
||||
Err(_) => return,
|
||||
};
|
||||
let mut ws_stream = match tokio_tungstenite::accept_async(stream).await {
|
||||
Ok(ws) => ws,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
let connection_requests = {
|
||||
let mut pending = connections.lock().unwrap();
|
||||
pending.pop_front()
|
||||
};
|
||||
|
||||
let Some(connection_requests) = connection_requests else {
|
||||
let _ = ws_stream.close(None).await;
|
||||
continue;
|
||||
};
|
||||
|
||||
let connection_index = {
|
||||
let mut log = requests.lock().unwrap();
|
||||
log.push(Vec::new());
|
||||
log.len() - 1
|
||||
};
|
||||
for request_events in connection_requests {
|
||||
let Some(Ok(message)) = ws_stream.next().await else {
|
||||
break;
|
||||
};
|
||||
if let Some(body) = parse_ws_request_body(message) {
|
||||
let mut log = requests.lock().unwrap();
|
||||
if let Some(connection_log) = log.get_mut(connection_index) {
|
||||
connection_log.push(WebSocketRequest { body });
|
||||
}
|
||||
}
|
||||
|
||||
for event in &request_events {
|
||||
let Ok(payload) = serde_json::to_string(event) else {
|
||||
continue;
|
||||
};
|
||||
if ws_stream.send(Message::Text(payload)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let _ = ws_stream.close(None).await;
|
||||
|
||||
if connections.lock().unwrap().is_empty() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
WebSocketTestServer {
|
||||
uri,
|
||||
connections: connections_log,
|
||||
shutdown: shutdown_tx,
|
||||
task,
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_ws_request_body(message: Message) -> Option<Value> {
|
||||
match message {
|
||||
Message::Text(text) => serde_json::from_str(&text).ok(),
|
||||
Message::Binary(bytes) => serde_json::from_slice(&bytes).ok(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct FunctionCallResponseMocks {
|
||||
pub function_call: ResponseMock,
|
||||
|
||||
@@ -19,6 +19,7 @@ pub struct StreamingSseChunk {
|
||||
/// Minimal streaming SSE server for tests that need gated per-chunk delivery.
|
||||
pub struct StreamingSseServer {
|
||||
uri: String,
|
||||
requests: Arc<TokioMutex<Vec<Vec<u8>>>>,
|
||||
shutdown: oneshot::Sender<()>,
|
||||
task: tokio::task::JoinHandle<()>,
|
||||
}
|
||||
@@ -28,6 +29,10 @@ impl StreamingSseServer {
|
||||
&self.uri
|
||||
}
|
||||
|
||||
pub async fn requests(&self) -> Vec<Vec<u8>> {
|
||||
self.requests.lock().await.clone()
|
||||
}
|
||||
|
||||
pub async fn shutdown(self) {
|
||||
let _ = self.shutdown.send(());
|
||||
let _ = self.task.await;
|
||||
@@ -61,6 +66,8 @@ pub async fn start_streaming_sse_server(
|
||||
responses: VecDeque::from(responses),
|
||||
completions: VecDeque::from(completion_senders),
|
||||
}));
|
||||
let requests = Arc::new(TokioMutex::new(Vec::new()));
|
||||
let requests_for_task = Arc::clone(&requests);
|
||||
let (shutdown_tx, mut shutdown_rx) = oneshot::channel();
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
@@ -70,6 +77,7 @@ pub async fn start_streaming_sse_server(
|
||||
accept_res = listener.accept() => {
|
||||
let (mut stream, _) = accept_res.expect("accept streaming SSE connection");
|
||||
let state = Arc::clone(&state);
|
||||
let requests = Arc::clone(&requests_for_task);
|
||||
tokio::spawn(async move {
|
||||
let (request, body_prefix) = read_http_request(&mut stream).await;
|
||||
let Some((method, path)) = parse_request_line(&request) else {
|
||||
@@ -78,7 +86,7 @@ pub async fn start_streaming_sse_server(
|
||||
};
|
||||
|
||||
if method == "GET" && path == "/v1/models" {
|
||||
if drain_request_body(&mut stream, &request, body_prefix)
|
||||
if read_request_body(&mut stream, &request, body_prefix)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
@@ -95,13 +103,16 @@ pub async fn start_streaming_sse_server(
|
||||
}
|
||||
|
||||
if method == "POST" && path == "/v1/responses" {
|
||||
if drain_request_body(&mut stream, &request, body_prefix)
|
||||
let body = match read_request_body(&mut stream, &request, body_prefix)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
let _ = write_http_response(&mut stream, 400, "bad request", "text/plain").await;
|
||||
return;
|
||||
}
|
||||
Ok(body) => body,
|
||||
Err(_) => {
|
||||
let _ = write_http_response(&mut stream, 400, "bad request", "text/plain").await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
requests.lock().await.push(body);
|
||||
let Some((chunks, completion)) = take_next_stream(&state).await else {
|
||||
let _ = write_http_response(&mut stream, 500, "no responses queued", "text/plain").await;
|
||||
return;
|
||||
@@ -137,6 +148,7 @@ pub async fn start_streaming_sse_server(
|
||||
(
|
||||
StreamingSseServer {
|
||||
uri,
|
||||
requests,
|
||||
shutdown: shutdown_tx,
|
||||
task,
|
||||
},
|
||||
@@ -202,13 +214,13 @@ fn content_length(headers: &str) -> Option<usize> {
|
||||
})
|
||||
}
|
||||
|
||||
async fn drain_request_body(
|
||||
async fn read_request_body(
|
||||
stream: &mut tokio::net::TcpStream,
|
||||
headers: &str,
|
||||
mut body_prefix: Vec<u8>,
|
||||
) -> std::io::Result<()> {
|
||||
) -> std::io::Result<Vec<u8>> {
|
||||
let Some(content_len) = content_length(headers) else {
|
||||
return Ok(());
|
||||
return Ok(body_prefix);
|
||||
};
|
||||
|
||||
if body_prefix.len() > content_len {
|
||||
@@ -217,12 +229,13 @@ async fn drain_request_body(
|
||||
|
||||
let remaining = content_len.saturating_sub(body_prefix.len());
|
||||
if remaining == 0 {
|
||||
return Ok(());
|
||||
return Ok(body_prefix);
|
||||
}
|
||||
|
||||
let mut rest = vec![0u8; remaining];
|
||||
stream.read_exact(&mut rest).await?;
|
||||
Ok(())
|
||||
body_prefix.extend_from_slice(&rest);
|
||||
Ok(body_prefix)
|
||||
}
|
||||
|
||||
async fn write_sse_headers(stream: &mut tokio::net::TcpStream) -> std::io::Result<()> {
|
||||
|
||||
@@ -8,6 +8,7 @@ use codex_core::CodexAuth;
|
||||
use codex_core::CodexThread;
|
||||
use codex_core::ModelProviderInfo;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::WireApi;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::features::Feature;
|
||||
@@ -23,6 +24,7 @@ use tempfile::TempDir;
|
||||
use wiremock::MockServer;
|
||||
|
||||
use crate::load_default_config_for_test;
|
||||
use crate::responses::WebSocketTestServer;
|
||||
use crate::responses::start_mock_server;
|
||||
use crate::streaming_sse::StreamingSseServer;
|
||||
use crate::wait_for_event;
|
||||
@@ -101,6 +103,21 @@ impl TestCodexBuilder {
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn build_with_websocket_server(
|
||||
&mut self,
|
||||
server: &WebSocketTestServer,
|
||||
) -> anyhow::Result<TestCodex> {
|
||||
let base_url = format!("{}/v1", server.uri());
|
||||
let home = Arc::new(TempDir::new()?);
|
||||
let base_url_clone = base_url.clone();
|
||||
self.config_mutators.push(Box::new(move |config| {
|
||||
config.model_provider.base_url = Some(base_url_clone);
|
||||
config.model_provider.wire_api = WireApi::ResponsesWebsocket;
|
||||
}));
|
||||
self.build_with_home_and_base_url(base_url, home, None)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn resume(
|
||||
&mut self,
|
||||
server: &wiremock::MockServer,
|
||||
|
||||
@@ -81,7 +81,7 @@ async fn responses_stream_includes_subagent_header_on_review() {
|
||||
session_source.clone(),
|
||||
);
|
||||
|
||||
let client = ModelClient::new(
|
||||
let mut client_session = ModelClient::new(
|
||||
Arc::clone(&config),
|
||||
None,
|
||||
model_info,
|
||||
@@ -91,7 +91,8 @@ async fn responses_stream_includes_subagent_header_on_review() {
|
||||
summary,
|
||||
conversation_id,
|
||||
session_source,
|
||||
);
|
||||
)
|
||||
.new_session();
|
||||
|
||||
let mut prompt = Prompt::default();
|
||||
prompt.input = vec![ResponseItem::Message {
|
||||
@@ -102,7 +103,7 @@ async fn responses_stream_includes_subagent_header_on_review() {
|
||||
}],
|
||||
}];
|
||||
|
||||
let mut stream = client.stream(&prompt).await.expect("stream failed");
|
||||
let mut stream = client_session.stream(&prompt).await.expect("stream failed");
|
||||
while let Some(event) = stream.next().await {
|
||||
if matches!(event, Ok(ResponseEvent::Completed { .. })) {
|
||||
break;
|
||||
@@ -176,7 +177,7 @@ async fn responses_stream_includes_subagent_header_on_other() {
|
||||
session_source.clone(),
|
||||
);
|
||||
|
||||
let client = ModelClient::new(
|
||||
let mut client_session = ModelClient::new(
|
||||
Arc::clone(&config),
|
||||
None,
|
||||
model_info,
|
||||
@@ -186,7 +187,8 @@ async fn responses_stream_includes_subagent_header_on_other() {
|
||||
summary,
|
||||
conversation_id,
|
||||
session_source,
|
||||
);
|
||||
)
|
||||
.new_session();
|
||||
|
||||
let mut prompt = Prompt::default();
|
||||
prompt.input = vec![ResponseItem::Message {
|
||||
@@ -197,7 +199,7 @@ async fn responses_stream_includes_subagent_header_on_other() {
|
||||
}],
|
||||
}];
|
||||
|
||||
let mut stream = client.stream(&prompt).await.expect("stream failed");
|
||||
let mut stream = client_session.stream(&prompt).await.expect("stream failed");
|
||||
while let Some(event) = stream.next().await {
|
||||
if matches!(event, Ok(ResponseEvent::Completed { .. })) {
|
||||
break;
|
||||
@@ -269,7 +271,7 @@ async fn responses_respects_model_info_overrides_from_config() {
|
||||
session_source.clone(),
|
||||
);
|
||||
|
||||
let client = ModelClient::new(
|
||||
let mut client = ModelClient::new(
|
||||
Arc::clone(&config),
|
||||
None,
|
||||
model_info,
|
||||
@@ -279,7 +281,8 @@ async fn responses_respects_model_info_overrides_from_config() {
|
||||
summary,
|
||||
conversation_id,
|
||||
session_source,
|
||||
);
|
||||
)
|
||||
.new_session();
|
||||
|
||||
let mut prompt = Prompt::default();
|
||||
prompt.input = vec![ResponseItem::Message {
|
||||
|
||||
69
codex-rs/core/tests/suite/agent_websocket.rs
Normal file
69
codex-rs/core/tests/suite/agent_websocket.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
use anyhow::Result;
|
||||
use core_test_support::responses::ev_assistant_message;
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::ev_done;
|
||||
use core_test_support::responses::ev_response_created;
|
||||
use core_test_support::responses::ev_shell_command_call;
|
||||
use core_test_support::responses::start_websocket_server;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::Value;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn websocket_test_codex_shell_chain() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let call_id = "shell-command-call";
|
||||
let server = start_websocket_server(vec![vec![
|
||||
vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_shell_command_call(call_id, "echo websocket"),
|
||||
ev_done(),
|
||||
],
|
||||
vec![
|
||||
ev_response_created("resp-2"),
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-2"),
|
||||
],
|
||||
]])
|
||||
.await;
|
||||
|
||||
let mut builder = test_codex();
|
||||
|
||||
let test = builder.build_with_websocket_server(&server).await?;
|
||||
test.submit_turn("run the echo command").await?;
|
||||
|
||||
let connection = server.single_connection();
|
||||
assert_eq!(connection.len(), 2);
|
||||
|
||||
let first = connection
|
||||
.first()
|
||||
.expect("missing first request")
|
||||
.body_json();
|
||||
let second = connection
|
||||
.get(1)
|
||||
.expect("missing second request")
|
||||
.body_json();
|
||||
|
||||
assert_eq!(first["type"].as_str(), Some("response.create"));
|
||||
assert_eq!(second["type"].as_str(), Some("response.append"));
|
||||
|
||||
let append_items = second
|
||||
.get("input")
|
||||
.and_then(Value::as_array)
|
||||
.expect("response.append input array");
|
||||
assert!(!append_items.is_empty());
|
||||
|
||||
let output_item = append_items
|
||||
.iter()
|
||||
.find(|item| item.get("type").and_then(Value::as_str) == Some("function_call_output"))
|
||||
.expect("function_call_output in append");
|
||||
assert_eq!(
|
||||
output_item.get("call_id").and_then(Value::as_str),
|
||||
Some(call_id)
|
||||
);
|
||||
|
||||
server.shutdown().await;
|
||||
Ok(())
|
||||
}
|
||||
@@ -284,7 +284,7 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
|
||||
let expected_initial_json = json!([]);
|
||||
assert_eq!(initial_json, expected_initial_json);
|
||||
|
||||
// 2) Submit new input; the request body must include the prior item followed by the new user input.
|
||||
// 2) Submit new input; the request body must include the prior items, then initial context, then new user input.
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
@@ -298,24 +298,55 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
|
||||
|
||||
let request = resp_mock.single_request();
|
||||
let request_body = request.body_json();
|
||||
let expected_input = json!([
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{ "type": "input_text", "text": "resumed user message" }]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [{ "type": "output_text", "text": "resumed assistant message" }]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{ "type": "input_text", "text": "hello" }]
|
||||
}
|
||||
]);
|
||||
assert_eq!(request_body["input"], expected_input);
|
||||
let input = request_body["input"].as_array().expect("input array");
|
||||
let messages: Vec<(String, String)> = input
|
||||
.iter()
|
||||
.filter_map(|item| {
|
||||
let role = item.get("role")?.as_str()?;
|
||||
let text = item
|
||||
.get("content")?
|
||||
.as_array()?
|
||||
.first()?
|
||||
.get("text")?
|
||||
.as_str()?;
|
||||
Some((role.to_string(), text.to_string()))
|
||||
})
|
||||
.collect();
|
||||
let pos_prior_user = messages
|
||||
.iter()
|
||||
.position(|(role, text)| role == "user" && text == "resumed user message")
|
||||
.expect("prior user message");
|
||||
let pos_prior_assistant = messages
|
||||
.iter()
|
||||
.position(|(role, text)| role == "assistant" && text == "resumed assistant message")
|
||||
.expect("prior assistant message");
|
||||
let pos_permissions = messages
|
||||
.iter()
|
||||
.position(|(role, text)| role == "developer" && text.contains("`approval_policy`"))
|
||||
.expect("permissions message");
|
||||
let pos_user_instructions = messages
|
||||
.iter()
|
||||
.position(|(role, text)| {
|
||||
role == "user"
|
||||
&& text.contains("be nice")
|
||||
&& (text.starts_with("# AGENTS.md instructions for ")
|
||||
|| text.starts_with("<user_instructions>"))
|
||||
})
|
||||
.expect("user instructions");
|
||||
let pos_environment = messages
|
||||
.iter()
|
||||
.position(|(role, text)| role == "user" && text.contains("<environment_context>"))
|
||||
.expect("environment context");
|
||||
let pos_new_user = messages
|
||||
.iter()
|
||||
.position(|(role, text)| role == "user" && text == "hello")
|
||||
.expect("new user message");
|
||||
|
||||
assert!(pos_prior_user < pos_prior_assistant);
|
||||
assert!(pos_prior_assistant < pos_permissions);
|
||||
assert!(pos_permissions < pos_user_instructions);
|
||||
assert!(pos_user_instructions < pos_environment);
|
||||
assert!(pos_environment < pos_new_user);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
@@ -344,7 +375,7 @@ async fn includes_conversation_id_and_model_headers_in_request() {
|
||||
);
|
||||
let NewThread {
|
||||
thread: codex,
|
||||
thread_id: conversation_id,
|
||||
thread_id: session_id,
|
||||
session_configured: _,
|
||||
..
|
||||
} = thread_manager
|
||||
@@ -366,15 +397,13 @@ async fn includes_conversation_id_and_model_headers_in_request() {
|
||||
|
||||
let request = resp_mock.single_request();
|
||||
assert_eq!(request.path(), "/v1/responses");
|
||||
let request_conversation_id = request
|
||||
.header("conversation_id")
|
||||
.expect("conversation_id header");
|
||||
let request_session_id = request.header("session_id").expect("session_id header");
|
||||
let request_authorization = request
|
||||
.header("authorization")
|
||||
.expect("authorization header");
|
||||
let request_originator = request.header("originator").expect("originator header");
|
||||
|
||||
assert_eq!(request_conversation_id, conversation_id.to_string());
|
||||
assert_eq!(request_session_id, session_id.to_string());
|
||||
assert_eq!(request_originator, "codex_cli_rs");
|
||||
assert_eq!(request_authorization, "Bearer Test API Key");
|
||||
}
|
||||
@@ -455,7 +484,7 @@ async fn chatgpt_auth_sends_correct_request() {
|
||||
);
|
||||
let NewThread {
|
||||
thread: codex,
|
||||
thread_id: conversation_id,
|
||||
thread_id,
|
||||
session_configured: _,
|
||||
..
|
||||
} = thread_manager
|
||||
@@ -477,9 +506,6 @@ async fn chatgpt_auth_sends_correct_request() {
|
||||
|
||||
let request = resp_mock.single_request();
|
||||
assert_eq!(request.path(), "/api/codex/responses");
|
||||
let request_conversation_id = request
|
||||
.header("conversation_id")
|
||||
.expect("conversation_id header");
|
||||
let request_authorization = request
|
||||
.header("authorization")
|
||||
.expect("authorization header");
|
||||
@@ -489,7 +515,9 @@ async fn chatgpt_auth_sends_correct_request() {
|
||||
.expect("chatgpt-account-id header");
|
||||
let request_body = request.body_json();
|
||||
|
||||
assert_eq!(request_conversation_id, conversation_id.to_string());
|
||||
let session_id = request.header("session_id").expect("session_id header");
|
||||
assert_eq!(session_id, thread_id.to_string());
|
||||
|
||||
assert_eq!(request_originator, "codex_cli_rs");
|
||||
assert_eq!(request_authorization, "Bearer Access Token");
|
||||
assert_eq!(request_chatgpt_account_id, "account_id");
|
||||
@@ -618,17 +646,26 @@ async fn includes_user_instructions_message_in_request() {
|
||||
.unwrap()
|
||||
.contains("be nice")
|
||||
);
|
||||
assert_message_role(&request_body["input"][0], "user");
|
||||
assert_message_starts_with(&request_body["input"][0], "# AGENTS.md instructions for ");
|
||||
assert_message_ends_with(&request_body["input"][0], "</INSTRUCTIONS>");
|
||||
let ui_text = request_body["input"][0]["content"][0]["text"]
|
||||
assert_message_role(&request_body["input"][0], "developer");
|
||||
let permissions_text = request_body["input"][0]["content"][0]["text"]
|
||||
.as_str()
|
||||
.expect("invalid permissions message content");
|
||||
assert!(
|
||||
permissions_text.contains("`sandbox_mode`"),
|
||||
"expected permissions message to mention sandbox_mode, got {permissions_text:?}"
|
||||
);
|
||||
|
||||
assert_message_role(&request_body["input"][1], "user");
|
||||
assert_message_starts_with(&request_body["input"][1], "# AGENTS.md instructions for ");
|
||||
assert_message_ends_with(&request_body["input"][1], "</INSTRUCTIONS>");
|
||||
let ui_text = request_body["input"][1]["content"][0]["text"]
|
||||
.as_str()
|
||||
.expect("invalid message content");
|
||||
assert!(ui_text.contains("<INSTRUCTIONS>"));
|
||||
assert!(ui_text.contains("be nice"));
|
||||
assert_message_role(&request_body["input"][1], "user");
|
||||
assert_message_starts_with(&request_body["input"][1], "<environment_context>");
|
||||
assert_message_ends_with(&request_body["input"][1], "</environment_context>");
|
||||
assert_message_role(&request_body["input"][2], "user");
|
||||
assert_message_starts_with(&request_body["input"][2], "<environment_context>");
|
||||
assert_message_ends_with(&request_body["input"][2], "</environment_context>");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
@@ -682,8 +719,10 @@ async fn skills_append_to_instructions() {
|
||||
let request = resp_mock.single_request();
|
||||
let request_body = request.body_json();
|
||||
|
||||
assert_message_role(&request_body["input"][0], "user");
|
||||
let instructions_text = request_body["input"][0]["content"][0]["text"]
|
||||
assert_message_role(&request_body["input"][0], "developer");
|
||||
|
||||
assert_message_role(&request_body["input"][1], "user");
|
||||
let instructions_text = request_body["input"][1]["content"][0]["text"]
|
||||
.as_str()
|
||||
.expect("instructions text");
|
||||
assert!(
|
||||
@@ -1049,6 +1088,10 @@ async fn includes_developer_instructions_message_in_request() {
|
||||
let request = resp_mock.single_request();
|
||||
let request_body = request.body_json();
|
||||
|
||||
let permissions_text = request_body["input"][0]["content"][0]["text"]
|
||||
.as_str()
|
||||
.expect("invalid permissions message content");
|
||||
|
||||
assert!(
|
||||
!request_body["instructions"]
|
||||
.as_str()
|
||||
@@ -1056,18 +1099,24 @@ async fn includes_developer_instructions_message_in_request() {
|
||||
.contains("be nice")
|
||||
);
|
||||
assert_message_role(&request_body["input"][0], "developer");
|
||||
assert_message_equals(&request_body["input"][0], "be useful");
|
||||
assert_message_role(&request_body["input"][1], "user");
|
||||
assert_message_starts_with(&request_body["input"][1], "# AGENTS.md instructions for ");
|
||||
assert_message_ends_with(&request_body["input"][1], "</INSTRUCTIONS>");
|
||||
let ui_text = request_body["input"][1]["content"][0]["text"]
|
||||
assert!(
|
||||
permissions_text.contains("`sandbox_mode`"),
|
||||
"expected permissions message to mention sandbox_mode, got {permissions_text:?}"
|
||||
);
|
||||
|
||||
assert_message_role(&request_body["input"][1], "developer");
|
||||
assert_message_equals(&request_body["input"][1], "be useful");
|
||||
assert_message_role(&request_body["input"][2], "user");
|
||||
assert_message_starts_with(&request_body["input"][2], "# AGENTS.md instructions for ");
|
||||
assert_message_ends_with(&request_body["input"][2], "</INSTRUCTIONS>");
|
||||
let ui_text = request_body["input"][2]["content"][0]["text"]
|
||||
.as_str()
|
||||
.expect("invalid message content");
|
||||
assert!(ui_text.contains("<INSTRUCTIONS>"));
|
||||
assert!(ui_text.contains("be nice"));
|
||||
assert_message_role(&request_body["input"][2], "user");
|
||||
assert_message_starts_with(&request_body["input"][2], "<environment_context>");
|
||||
assert_message_ends_with(&request_body["input"][2], "</environment_context>");
|
||||
assert_message_role(&request_body["input"][3], "user");
|
||||
assert_message_starts_with(&request_body["input"][3], "<environment_context>");
|
||||
assert_message_ends_with(&request_body["input"][3], "</environment_context>");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
@@ -1122,7 +1171,7 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
|
||||
SessionSource::Exec,
|
||||
);
|
||||
|
||||
let client = ModelClient::new(
|
||||
let mut client = ModelClient::new(
|
||||
Arc::clone(&config),
|
||||
None,
|
||||
model_info,
|
||||
@@ -1132,7 +1181,8 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
|
||||
summary,
|
||||
conversation_id,
|
||||
SessionSource::Exec,
|
||||
);
|
||||
)
|
||||
.new_session();
|
||||
|
||||
let mut prompt = Prompt::default();
|
||||
prompt.input.push(ResponseItem::Reasoning {
|
||||
|
||||
213
codex-rs/core/tests/suite/client_websockets.rs
Normal file
213
codex-rs/core/tests/suite/client_websockets.rs
Normal file
@@ -0,0 +1,213 @@
|
||||
#![allow(clippy::expect_used, clippy::unwrap_used)]
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::ContentItem;
|
||||
use codex_core::ModelClient;
|
||||
use codex_core::ModelClientSession;
|
||||
use codex_core::ModelProviderInfo;
|
||||
use codex_core::Prompt;
|
||||
use codex_core::ResponseEvent;
|
||||
use codex_core::ResponseItem;
|
||||
use codex_core::WireApi;
|
||||
use codex_core::models_manager::manager::ModelsManager;
|
||||
use codex_core::protocol::SessionSource;
|
||||
use codex_otel::OtelManager;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::responses::WebSocketTestServer;
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::ev_response_created;
|
||||
use core_test_support::responses::start_websocket_server;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use futures::StreamExt;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::sync::Arc;
|
||||
use tempfile::TempDir;
|
||||
|
||||
const MODEL: &str = "gpt-5.2-codex";
|
||||
|
||||
struct WebsocketTestHarness {
|
||||
_codex_home: TempDir,
|
||||
client: ModelClient,
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn responses_websocket_streams_request() {
|
||||
skip_if_no_network!();
|
||||
|
||||
let server = start_websocket_server(vec![vec![vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_completed("resp-1"),
|
||||
]]])
|
||||
.await;
|
||||
|
||||
let harness = websocket_harness(&server).await;
|
||||
let mut session = harness.client.new_session();
|
||||
let prompt = prompt_with_input(vec![message_item("hello")]);
|
||||
|
||||
stream_until_complete(&mut session, &prompt).await;
|
||||
|
||||
let connection = server.single_connection();
|
||||
assert_eq!(connection.len(), 1);
|
||||
let body = connection.first().expect("missing request").body_json();
|
||||
|
||||
assert_eq!(body["type"].as_str(), Some("response.create"));
|
||||
assert_eq!(body["model"].as_str(), Some(MODEL));
|
||||
assert_eq!(body["stream"], serde_json::Value::Bool(true));
|
||||
assert_eq!(body["input"].as_array().map(Vec::len), Some(1));
|
||||
|
||||
server.shutdown().await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn responses_websocket_appends_on_prefix() {
|
||||
skip_if_no_network!();
|
||||
|
||||
let server = start_websocket_server(vec![vec![
|
||||
vec![ev_response_created("resp-1"), ev_completed("resp-1")],
|
||||
vec![ev_response_created("resp-2"), ev_completed("resp-2")],
|
||||
]])
|
||||
.await;
|
||||
|
||||
let harness = websocket_harness(&server).await;
|
||||
let mut session = harness.client.new_session();
|
||||
let prompt_one = prompt_with_input(vec![message_item("hello")]);
|
||||
let prompt_two = prompt_with_input(vec![message_item("hello"), message_item("second")]);
|
||||
|
||||
stream_until_complete(&mut session, &prompt_one).await;
|
||||
stream_until_complete(&mut session, &prompt_two).await;
|
||||
|
||||
let connection = server.single_connection();
|
||||
assert_eq!(connection.len(), 2);
|
||||
let first = connection.first().expect("missing request").body_json();
|
||||
let second = connection.get(1).expect("missing request").body_json();
|
||||
|
||||
assert_eq!(first["type"].as_str(), Some("response.create"));
|
||||
assert_eq!(first["model"].as_str(), Some(MODEL));
|
||||
assert_eq!(first["stream"], serde_json::Value::Bool(true));
|
||||
assert_eq!(first["input"].as_array().map(Vec::len), Some(1));
|
||||
let expected_append = serde_json::json!({
|
||||
"type": "response.append",
|
||||
"input": serde_json::to_value(&prompt_two.input[1..]).expect("serialize append items"),
|
||||
});
|
||||
assert_eq!(second, expected_append);
|
||||
|
||||
server.shutdown().await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn responses_websocket_creates_on_non_prefix() {
|
||||
skip_if_no_network!();
|
||||
|
||||
let server = start_websocket_server(vec![vec![
|
||||
vec![ev_response_created("resp-1"), ev_completed("resp-1")],
|
||||
vec![ev_response_created("resp-2"), ev_completed("resp-2")],
|
||||
]])
|
||||
.await;
|
||||
|
||||
let harness = websocket_harness(&server).await;
|
||||
let mut session = harness.client.new_session();
|
||||
let prompt_one = prompt_with_input(vec![message_item("hello")]);
|
||||
let prompt_two = prompt_with_input(vec![message_item("different")]);
|
||||
|
||||
stream_until_complete(&mut session, &prompt_one).await;
|
||||
stream_until_complete(&mut session, &prompt_two).await;
|
||||
|
||||
let connection = server.single_connection();
|
||||
assert_eq!(connection.len(), 2);
|
||||
let second = connection.get(1).expect("missing request").body_json();
|
||||
|
||||
assert_eq!(second["type"].as_str(), Some("response.create"));
|
||||
assert_eq!(second["model"].as_str(), Some(MODEL));
|
||||
assert_eq!(second["stream"], serde_json::Value::Bool(true));
|
||||
assert_eq!(
|
||||
second["input"],
|
||||
serde_json::to_value(&prompt_two.input).unwrap()
|
||||
);
|
||||
|
||||
server.shutdown().await;
|
||||
}
|
||||
|
||||
fn message_item(text: &str) -> ResponseItem {
|
||||
ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".into(),
|
||||
content: vec![ContentItem::InputText { text: text.into() }],
|
||||
}
|
||||
}
|
||||
|
||||
fn prompt_with_input(input: Vec<ResponseItem>) -> Prompt {
|
||||
let mut prompt = Prompt::default();
|
||||
prompt.input = input;
|
||||
prompt
|
||||
}
|
||||
|
||||
fn websocket_provider(server: &WebSocketTestServer) -> ModelProviderInfo {
|
||||
ModelProviderInfo {
|
||||
name: "mock-ws".into(),
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
env_key: None,
|
||||
env_key_instructions: None,
|
||||
experimental_bearer_token: None,
|
||||
wire_api: WireApi::ResponsesWebsocket,
|
||||
query_params: None,
|
||||
http_headers: None,
|
||||
env_http_headers: None,
|
||||
request_max_retries: Some(0),
|
||||
stream_max_retries: Some(0),
|
||||
stream_idle_timeout_ms: Some(5_000),
|
||||
requires_openai_auth: false,
|
||||
}
|
||||
}
|
||||
|
||||
async fn websocket_harness(server: &WebSocketTestServer) -> WebsocketTestHarness {
|
||||
let provider = websocket_provider(server);
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
let mut config = load_default_config_for_test(&codex_home).await;
|
||||
config.model = Some(MODEL.to_string());
|
||||
let config = Arc::new(config);
|
||||
let model_info = ModelsManager::construct_model_info_offline(MODEL, &config);
|
||||
let conversation_id = ThreadId::new();
|
||||
let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key"));
|
||||
let otel_manager = OtelManager::new(
|
||||
conversation_id,
|
||||
MODEL,
|
||||
model_info.slug.as_str(),
|
||||
None,
|
||||
Some("test@test.com".to_string()),
|
||||
auth_manager.get_auth_mode(),
|
||||
false,
|
||||
"test".to_string(),
|
||||
SessionSource::Exec,
|
||||
);
|
||||
let client = ModelClient::new(
|
||||
Arc::clone(&config),
|
||||
None,
|
||||
model_info,
|
||||
otel_manager,
|
||||
provider.clone(),
|
||||
None,
|
||||
ReasoningSummary::Auto,
|
||||
conversation_id,
|
||||
SessionSource::Exec,
|
||||
);
|
||||
|
||||
WebsocketTestHarness {
|
||||
_codex_home: codex_home,
|
||||
client,
|
||||
}
|
||||
}
|
||||
|
||||
async fn stream_until_complete(session: &mut ModelClientSession, prompt: &Prompt) {
|
||||
let mut stream = session
|
||||
.stream(prompt)
|
||||
.await
|
||||
.expect("websocket stream failed");
|
||||
|
||||
while let Some(event) = stream.next().await {
|
||||
if matches!(event, Ok(ResponseEvent::Completed { .. })) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -604,8 +604,14 @@ async fn multiple_auto_compact_per_task_runs_after_token_limit_hit() {
|
||||
.and_then(|item| item.get("text"))
|
||||
.and_then(|text| text.as_str());
|
||||
|
||||
// Ignore the cached UI prefix (project docs + skills) since it is not relevant to
|
||||
// compaction behavior and can change as bundled skills evolve.
|
||||
// Ignore cached prefix messages (project docs + permissions) since they are not
|
||||
// relevant to compaction behavior and can change as bundled prompts evolve.
|
||||
let role = value.get("role").and_then(|role| role.as_str());
|
||||
if role == Some("developer")
|
||||
&& text.is_some_and(|text| text.contains("`sandbox_mode`"))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
!text.is_some_and(|text| text.starts_with("# AGENTS.md instructions for "))
|
||||
})
|
||||
.cloned()
|
||||
@@ -1726,9 +1732,11 @@ async fn manual_compact_twice_preserves_latest_user_messages() {
|
||||
.into_iter()
|
||||
.collect::<VecDeque<_>>();
|
||||
|
||||
// System prompt
|
||||
// Permissions developer message
|
||||
final_output.pop_front();
|
||||
// Developer instructions
|
||||
// User instructions (project docs/skills)
|
||||
final_output.pop_front();
|
||||
// Environment context
|
||||
final_output.pop_front();
|
||||
|
||||
let _ = final_output
|
||||
|
||||
@@ -216,11 +216,12 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
let user_instructions = requests[0]["input"][0]["content"][0]["text"]
|
||||
let permissions_message = requests[0]["input"][0].clone();
|
||||
let user_instructions = requests[0]["input"][1]["content"][0]["text"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
let environment_context = requests[0]["input"][1]["content"][0]["text"]
|
||||
let environment_context = requests[0]["input"][2]["content"][0]["text"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
@@ -241,6 +242,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
"model": expected_model,
|
||||
"instructions": prompt,
|
||||
"input": [
|
||||
permissions_message,
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
@@ -290,6 +292,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
"model": expected_model,
|
||||
"instructions": prompt,
|
||||
"input": [
|
||||
permissions_message,
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
@@ -359,6 +362,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
"model": expected_model,
|
||||
"instructions": prompt,
|
||||
"input": [
|
||||
permissions_message,
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
@@ -419,6 +423,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
"model": expected_model,
|
||||
"instructions": prompt,
|
||||
"input": [
|
||||
permissions_message,
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
@@ -470,6 +475,27 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
}
|
||||
]
|
||||
},
|
||||
permissions_message,
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": user_instructions
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": environment_context
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
@@ -499,6 +525,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
"model": expected_model,
|
||||
"instructions": prompt,
|
||||
"input": [
|
||||
permissions_message,
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
@@ -550,6 +577,48 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
|
||||
}
|
||||
]
|
||||
},
|
||||
permissions_message,
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": user_instructions
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": environment_context
|
||||
}
|
||||
]
|
||||
},
|
||||
permissions_message,
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": user_instructions
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": environment_context
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
@@ -664,11 +733,12 @@ async fn compact_resume_after_second_compaction_preserves_history() {
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
let user_instructions = requests[0]["input"][0]["content"][0]["text"]
|
||||
let permissions_message = requests[0]["input"][0].clone();
|
||||
let user_instructions = requests[0]["input"][1]["content"][0]["text"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
let environment_instructions = requests[0]["input"][1]["content"][0]["text"]
|
||||
let environment_instructions = requests[0]["input"][2]["content"][0]["text"]
|
||||
.as_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
@@ -682,6 +752,7 @@ async fn compact_resume_after_second_compaction_preserves_history() {
|
||||
{
|
||||
"instructions": prompt,
|
||||
"input": [
|
||||
permissions_message,
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
@@ -723,6 +794,27 @@ async fn compact_resume_after_second_compaction_preserves_history() {
|
||||
}
|
||||
]
|
||||
},
|
||||
permissions_message,
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": user_instructions
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": environment_instructions
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
|
||||
@@ -138,8 +138,9 @@ async fn fork_thread_twice_drops_to_first_message() {
|
||||
|
||||
// GetHistory on fork1 flushed; the file is ready.
|
||||
let fork1_items = read_items(&fork1_path);
|
||||
assert!(fork1_items.len() > expected_after_first.len());
|
||||
pretty_assertions::assert_eq!(
|
||||
serde_json::to_value(&fork1_items).unwrap(),
|
||||
serde_json::to_value(&fork1_items[..expected_after_first.len()]).unwrap(),
|
||||
serde_json::to_value(&expected_after_first).unwrap()
|
||||
);
|
||||
|
||||
@@ -162,8 +163,9 @@ async fn fork_thread_twice_drops_to_first_message() {
|
||||
.unwrap_or(0);
|
||||
let expected_after_second: Vec<RolloutItem> = fork1_items[..cut_last_on_fork1].to_vec();
|
||||
let fork2_items = read_items(&fork2_path);
|
||||
assert!(fork2_items.len() > expected_after_second.len());
|
||||
pretty_assertions::assert_eq!(
|
||||
serde_json::to_value(&fork2_items).unwrap(),
|
||||
serde_json::to_value(&fork2_items[..expected_after_second.len()]).unwrap(),
|
||||
serde_json::to_value(&expected_after_second).unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
239
codex-rs/core/tests/suite/image_rollout.rs
Normal file
239
codex-rs/core/tests/suite/image_rollout.rs
Normal file
@@ -0,0 +1,239 @@
|
||||
use anyhow::Context;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::RolloutItem;
|
||||
use codex_core::protocol::RolloutLine;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::responses::ev_assistant_message;
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::ev_response_created;
|
||||
use core_test_support::responses::sse;
|
||||
use core_test_support::responses::start_mock_server;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use core_test_support::test_codex::TestCodex;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
use core_test_support::wait_for_event;
|
||||
use image::ImageBuffer;
|
||||
use image::Rgba;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
fn find_user_message_with_image(text: &str) -> Option<ResponseItem> {
|
||||
for line in text.lines() {
|
||||
let trimmed = line.trim();
|
||||
if trimmed.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let rollout: RolloutLine = match serde_json::from_str(trimmed) {
|
||||
Ok(rollout) => rollout,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if let RolloutItem::ResponseItem(ResponseItem::Message { role, content, .. }) =
|
||||
&rollout.item
|
||||
&& role == "user"
|
||||
&& content
|
||||
.iter()
|
||||
.any(|span| matches!(span, ContentItem::InputImage { .. }))
|
||||
&& let RolloutItem::ResponseItem(item) = rollout.item.clone()
|
||||
{
|
||||
return Some(item);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn extract_image_url(item: &ResponseItem) -> Option<String> {
|
||||
match item {
|
||||
ResponseItem::Message { content, .. } => content.iter().find_map(|span| match span {
|
||||
ContentItem::InputImage { image_url } => Some(image_url.clone()),
|
||||
_ => None,
|
||||
}),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn read_rollout_text(path: &Path) -> anyhow::Result<String> {
|
||||
for _ in 0..50 {
|
||||
if path.exists()
|
||||
&& let Ok(text) = std::fs::read_to_string(path)
|
||||
&& !text.trim().is_empty()
|
||||
{
|
||||
return Ok(text);
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(20)).await;
|
||||
}
|
||||
std::fs::read_to_string(path)
|
||||
.with_context(|| format!("read rollout file at {}", path.display()))
|
||||
}
|
||||
|
||||
fn write_test_png(path: &Path, color: [u8; 4]) -> anyhow::Result<()> {
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent)?;
|
||||
}
|
||||
let image = ImageBuffer::from_pixel(2, 2, Rgba(color));
|
||||
image.save(path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn copy_paste_local_image_persists_rollout_request_shape() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
home: _home,
|
||||
..
|
||||
} = test_codex().build(&server).await?;
|
||||
|
||||
let rel_path = "images/paste.png";
|
||||
let abs_path = cwd.path().join(rel_path);
|
||||
write_test_png(&abs_path, [12, 34, 56, 255])?;
|
||||
|
||||
let response = sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
responses::mount_sse_once(&server, response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![
|
||||
UserInput::LocalImage {
|
||||
path: abs_path.clone(),
|
||||
},
|
||||
UserInput::Text {
|
||||
text: "pasted image".to_string(),
|
||||
},
|
||||
],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
|
||||
codex.submit(Op::Shutdown).await?;
|
||||
wait_for_event(&codex, |event| matches!(event, EventMsg::ShutdownComplete)).await;
|
||||
|
||||
let rollout_path = codex.rollout_path();
|
||||
let rollout_text = read_rollout_text(&rollout_path).await?;
|
||||
let actual = find_user_message_with_image(&rollout_text)
|
||||
.expect("expected user message with input image in rollout");
|
||||
|
||||
let image_url = extract_image_url(&actual).expect("expected image url in rollout");
|
||||
let expected = ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![
|
||||
ContentItem::InputText {
|
||||
text: codex_protocol::models::local_image_open_tag_text(1),
|
||||
},
|
||||
ContentItem::InputImage { image_url },
|
||||
ContentItem::InputText {
|
||||
text: codex_protocol::models::image_close_tag_text(),
|
||||
},
|
||||
ContentItem::InputText {
|
||||
text: "pasted image".to_string(),
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
assert_eq!(actual, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn drag_drop_image_persists_rollout_request_shape() -> anyhow::Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
|
||||
let TestCodex {
|
||||
codex,
|
||||
cwd,
|
||||
session_configured,
|
||||
home: _home,
|
||||
..
|
||||
} = test_codex().build(&server).await?;
|
||||
|
||||
let image_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR4nGNgYAAAAAMAASsJTYQAAAAASUVORK5CYII=".to_string();
|
||||
|
||||
let response = sse(vec![
|
||||
ev_response_created("resp-1"),
|
||||
ev_assistant_message("msg-1", "done"),
|
||||
ev_completed("resp-1"),
|
||||
]);
|
||||
responses::mount_sse_once(&server, response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![
|
||||
UserInput::Image {
|
||||
image_url: image_url.clone(),
|
||||
},
|
||||
UserInput::Text {
|
||||
text: "dropped image".to_string(),
|
||||
},
|
||||
],
|
||||
final_output_json_schema: None,
|
||||
cwd: cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::DangerFullAccess,
|
||||
model: session_model,
|
||||
effort: None,
|
||||
summary: ReasoningSummary::Auto,
|
||||
})
|
||||
.await?;
|
||||
|
||||
wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
|
||||
codex.submit(Op::Shutdown).await?;
|
||||
wait_for_event(&codex, |event| matches!(event, EventMsg::ShutdownComplete)).await;
|
||||
|
||||
let rollout_path = codex.rollout_path();
|
||||
let rollout_text = read_rollout_text(&rollout_path).await?;
|
||||
let actual = find_user_message_with_image(&rollout_text)
|
||||
.expect("expected user message with input image in rollout");
|
||||
|
||||
let image_url = extract_image_url(&actual).expect("expected image url in rollout");
|
||||
let expected = ResponseItem::Message {
|
||||
id: None,
|
||||
role: "user".to_string(),
|
||||
content: vec![
|
||||
ContentItem::InputText {
|
||||
text: codex_protocol::models::image_open_tag_text(),
|
||||
},
|
||||
ContentItem::InputImage { image_url },
|
||||
ContentItem::InputText {
|
||||
text: codex_protocol::models::image_close_tag_text(),
|
||||
},
|
||||
ContentItem::InputText {
|
||||
text: "dropped image".to_string(),
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
assert_eq!(actual, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -6,6 +6,7 @@ use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use indoc::indoc;
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::tempdir;
|
||||
|
||||
@@ -410,6 +411,16 @@ fn gpt52_codex_upgrade() -> codex_protocol::openai_models::ModelUpgrade {
|
||||
"Codex is now powered by gpt-5.2-codex, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work."
|
||||
.to_string(),
|
||||
),
|
||||
migration_markdown: Some(
|
||||
indoc! {r#"
|
||||
**Codex just got an upgrade. Introducing {model_to}.**
|
||||
|
||||
Codex is now powered by gpt-5.2-codex, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work. Learn more about {model_to} at https://openai.com/index/introducing-gpt-5-2-codex
|
||||
|
||||
You can continue using {model_from} if you prefer.
|
||||
"#}
|
||||
.to_string(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,12 +15,14 @@ pub static CODEX_ALIASES_TEMP_DIR: TempDir = unsafe {
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
mod abort_tasks;
|
||||
mod agent_websocket;
|
||||
mod apply_patch_cli;
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
mod approvals;
|
||||
mod auth_refresh;
|
||||
mod cli_stream;
|
||||
mod client;
|
||||
mod client_websockets;
|
||||
mod codex_delegate;
|
||||
mod compact;
|
||||
mod compact_remote;
|
||||
@@ -31,6 +33,7 @@ mod exec_policy;
|
||||
mod fork_thread;
|
||||
mod grep_files;
|
||||
mod hierarchical_agents;
|
||||
mod image_rollout;
|
||||
mod items;
|
||||
mod json_result;
|
||||
mod list_dir;
|
||||
@@ -41,6 +44,8 @@ mod model_overrides;
|
||||
mod model_tools;
|
||||
mod models_etag_responses;
|
||||
mod otel;
|
||||
mod pending_input;
|
||||
mod permissions_messages;
|
||||
mod prompt_caching;
|
||||
mod quota_exceeded;
|
||||
mod read_file;
|
||||
|
||||
143
codex-rs/core/tests/suite/pending_input.rs
Normal file
143
codex-rs/core/tests/suite/pending_input.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use core_test_support::responses;
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::ev_message_item_added;
|
||||
use core_test_support::responses::ev_output_text_delta;
|
||||
use core_test_support::responses::ev_response_created;
|
||||
use core_test_support::streaming_sse::StreamingSseChunk;
|
||||
use core_test_support::streaming_sse::start_streaming_sse_server;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
use core_test_support::wait_for_event;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::Value;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
fn ev_message_item_done(id: &str, text: &str) -> Value {
|
||||
serde_json::json!({
|
||||
"type": "response.output_item.done",
|
||||
"item": {
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"id": id,
|
||||
"content": [{"type": "output_text", "text": text}]
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn sse_event(event: Value) -> String {
|
||||
responses::sse(vec![event])
|
||||
}
|
||||
|
||||
fn message_input_texts(body: &Value, role: &str) -> Vec<String> {
|
||||
body.get("input")
|
||||
.and_then(Value::as_array)
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.filter(|item| item.get("type").and_then(Value::as_str) == Some("message"))
|
||||
.filter(|item| item.get("role").and_then(Value::as_str) == Some(role))
|
||||
.filter_map(|item| item.get("content").and_then(Value::as_array))
|
||||
.flatten()
|
||||
.filter(|span| span.get("type").and_then(Value::as_str) == Some("input_text"))
|
||||
.filter_map(|span| span.get("text").and_then(Value::as_str).map(str::to_owned))
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn injected_user_input_triggers_follow_up_request_with_deltas() {
|
||||
let (gate_completed_tx, gate_completed_rx) = oneshot::channel();
|
||||
|
||||
let first_chunks = vec![
|
||||
StreamingSseChunk {
|
||||
gate: None,
|
||||
body: sse_event(ev_response_created("resp-1")),
|
||||
},
|
||||
StreamingSseChunk {
|
||||
gate: None,
|
||||
body: sse_event(ev_message_item_added("msg-1", "")),
|
||||
},
|
||||
StreamingSseChunk {
|
||||
gate: None,
|
||||
body: sse_event(ev_output_text_delta("first ")),
|
||||
},
|
||||
StreamingSseChunk {
|
||||
gate: None,
|
||||
body: sse_event(ev_output_text_delta("turn")),
|
||||
},
|
||||
StreamingSseChunk {
|
||||
gate: None,
|
||||
body: sse_event(ev_message_item_done("msg-1", "first turn")),
|
||||
},
|
||||
StreamingSseChunk {
|
||||
gate: Some(gate_completed_rx),
|
||||
body: sse_event(ev_completed("resp-1")),
|
||||
},
|
||||
];
|
||||
|
||||
let second_chunks = vec![
|
||||
StreamingSseChunk {
|
||||
gate: None,
|
||||
body: sse_event(ev_response_created("resp-2")),
|
||||
},
|
||||
StreamingSseChunk {
|
||||
gate: None,
|
||||
body: sse_event(ev_completed("resp-2")),
|
||||
},
|
||||
];
|
||||
|
||||
let (server, _completions) =
|
||||
start_streaming_sse_server(vec![first_chunks, second_chunks]).await;
|
||||
|
||||
let codex = test_codex()
|
||||
.with_model("gpt-5.1")
|
||||
.build_with_streaming_server(&server)
|
||||
.await
|
||||
.unwrap()
|
||||
.codex;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "first prompt".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
wait_for_event(&codex, |event| {
|
||||
matches!(event, EventMsg::AgentMessageContentDelta(_))
|
||||
})
|
||||
.await;
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "second prompt".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let _ = gate_completed_tx.send(());
|
||||
|
||||
wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let requests = server.requests().await;
|
||||
assert_eq!(requests.len(), 2);
|
||||
|
||||
let first_body: Value = serde_json::from_slice(&requests[0]).expect("parse first request");
|
||||
let second_body: Value = serde_json::from_slice(&requests[1]).expect("parse second request");
|
||||
|
||||
let first_texts = message_input_texts(&first_body, "user");
|
||||
assert!(first_texts.iter().any(|text| text == "first prompt"));
|
||||
assert!(!first_texts.iter().any(|text| text == "second prompt"));
|
||||
|
||||
let second_texts = message_input_texts(&second_body, "user");
|
||||
assert!(second_texts.iter().any(|text| text == "first prompt"));
|
||||
assert!(second_texts.iter().any(|text| text == "second prompt"));
|
||||
|
||||
server.shutdown().await;
|
||||
}
|
||||
448
codex-rs/core/tests/suite/permissions_messages.rs
Normal file
448
codex-rs/core/tests/suite/permissions_messages.rs
Normal file
@@ -0,0 +1,448 @@
|
||||
use anyhow::Result;
|
||||
use codex_core::config::Constrained;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use codex_utils_absolute_path::AbsolutePathBuf;
|
||||
use core_test_support::responses::ev_completed;
|
||||
use core_test_support::responses::ev_response_created;
|
||||
use core_test_support::responses::mount_sse_once;
|
||||
use core_test_support::responses::sse;
|
||||
use core_test_support::responses::start_mock_server;
|
||||
use core_test_support::skip_if_no_network;
|
||||
use core_test_support::test_codex::test_codex;
|
||||
use core_test_support::wait_for_event;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::collections::HashSet;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn permissions_texts(input: &[serde_json::Value]) -> Vec<String> {
|
||||
input
|
||||
.iter()
|
||||
.filter_map(|item| {
|
||||
let role = item.get("role")?.as_str()?;
|
||||
if role != "developer" {
|
||||
return None;
|
||||
}
|
||||
let text = item
|
||||
.get("content")?
|
||||
.as_array()?
|
||||
.first()?
|
||||
.get("text")?
|
||||
.as_str()?;
|
||||
if text.contains("`approval_policy`") {
|
||||
Some(text.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn sse_completed(id: &str) -> String {
|
||||
sse(vec![ev_response_created(id), ev_completed(id)])
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn permissions_message_sent_once_on_start() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let req = mount_sse_once(&server, sse_completed("resp-1")).await;
|
||||
|
||||
let mut builder = test_codex().with_config(move |config| {
|
||||
config.approval_policy = Constrained::allow_any(AskForApproval::OnRequest);
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
test.codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "hello".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let request = req.single_request();
|
||||
let body = request.body_json();
|
||||
let input = body["input"].as_array().expect("input array");
|
||||
let permissions = permissions_texts(input);
|
||||
assert_eq!(permissions.len(), 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn permissions_message_added_on_override_change() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let req1 = mount_sse_once(&server, sse_completed("resp-1")).await;
|
||||
let req2 = mount_sse_once(&server, sse_completed("resp-2")).await;
|
||||
|
||||
let mut builder = test_codex().with_config(move |config| {
|
||||
config.approval_policy = Constrained::allow_any(AskForApproval::OnRequest);
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
test.codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "hello 1".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
test.codex
|
||||
.submit(Op::OverrideTurnContext {
|
||||
cwd: None,
|
||||
approval_policy: Some(AskForApproval::Never),
|
||||
sandbox_policy: None,
|
||||
model: None,
|
||||
effort: None,
|
||||
summary: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
test.codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "hello 2".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let body1 = req1.single_request().body_json();
|
||||
let body2 = req2.single_request().body_json();
|
||||
let input1 = body1["input"].as_array().expect("input array");
|
||||
let input2 = body2["input"].as_array().expect("input array");
|
||||
let permissions_1 = permissions_texts(input1);
|
||||
let permissions_2 = permissions_texts(input2);
|
||||
|
||||
assert_eq!(permissions_1.len(), 1);
|
||||
assert_eq!(permissions_2.len(), 2);
|
||||
let unique = permissions_2.into_iter().collect::<HashSet<String>>();
|
||||
assert_eq!(unique.len(), 2);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn permissions_message_not_added_when_no_change() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let req1 = mount_sse_once(&server, sse_completed("resp-1")).await;
|
||||
let req2 = mount_sse_once(&server, sse_completed("resp-2")).await;
|
||||
|
||||
let mut builder = test_codex().with_config(move |config| {
|
||||
config.approval_policy = Constrained::allow_any(AskForApproval::OnRequest);
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
test.codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "hello 1".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
test.codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "hello 2".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let body1 = req1.single_request().body_json();
|
||||
let body2 = req2.single_request().body_json();
|
||||
let input1 = body1["input"].as_array().expect("input array");
|
||||
let input2 = body2["input"].as_array().expect("input array");
|
||||
let permissions_1 = permissions_texts(input1);
|
||||
let permissions_2 = permissions_texts(input2);
|
||||
|
||||
assert_eq!(permissions_1.len(), 1);
|
||||
assert_eq!(permissions_2.len(), 1);
|
||||
assert_eq!(permissions_1, permissions_2);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn resume_replays_permissions_messages() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let _req1 = mount_sse_once(&server, sse_completed("resp-1")).await;
|
||||
let _req2 = mount_sse_once(&server, sse_completed("resp-2")).await;
|
||||
let req3 = mount_sse_once(&server, sse_completed("resp-3")).await;
|
||||
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.approval_policy = Constrained::allow_any(AskForApproval::OnRequest);
|
||||
});
|
||||
let initial = builder.build(&server).await?;
|
||||
let rollout_path = initial.session_configured.rollout_path.clone();
|
||||
let home = initial.home.clone();
|
||||
|
||||
initial
|
||||
.codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "hello 1".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
wait_for_event(&initial.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
initial
|
||||
.codex
|
||||
.submit(Op::OverrideTurnContext {
|
||||
cwd: None,
|
||||
approval_policy: Some(AskForApproval::Never),
|
||||
sandbox_policy: None,
|
||||
model: None,
|
||||
effort: None,
|
||||
summary: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
initial
|
||||
.codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "hello 2".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
wait_for_event(&initial.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let resumed = builder.resume(&server, home, rollout_path).await?;
|
||||
resumed
|
||||
.codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "after resume".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
wait_for_event(&resumed.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let body3 = req3.single_request().body_json();
|
||||
let input = body3["input"].as_array().expect("input array");
|
||||
let permissions = permissions_texts(input);
|
||||
assert_eq!(permissions.len(), 3);
|
||||
let unique = permissions.into_iter().collect::<HashSet<String>>();
|
||||
assert_eq!(unique.len(), 2);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn resume_and_fork_append_permissions_messages() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let _req1 = mount_sse_once(&server, sse_completed("resp-1")).await;
|
||||
let req2 = mount_sse_once(&server, sse_completed("resp-2")).await;
|
||||
let req3 = mount_sse_once(&server, sse_completed("resp-3")).await;
|
||||
let req4 = mount_sse_once(&server, sse_completed("resp-4")).await;
|
||||
|
||||
let mut builder = test_codex().with_config(|config| {
|
||||
config.approval_policy = Constrained::allow_any(AskForApproval::OnRequest);
|
||||
});
|
||||
let initial = builder.build(&server).await?;
|
||||
let rollout_path = initial.session_configured.rollout_path.clone();
|
||||
let home = initial.home.clone();
|
||||
|
||||
initial
|
||||
.codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "hello 1".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
wait_for_event(&initial.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
initial
|
||||
.codex
|
||||
.submit(Op::OverrideTurnContext {
|
||||
cwd: None,
|
||||
approval_policy: Some(AskForApproval::Never),
|
||||
sandbox_policy: None,
|
||||
model: None,
|
||||
effort: None,
|
||||
summary: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
initial
|
||||
.codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "hello 2".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
wait_for_event(&initial.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let body2 = req2.single_request().body_json();
|
||||
let input2 = body2["input"].as_array().expect("input array");
|
||||
let permissions_base = permissions_texts(input2);
|
||||
assert_eq!(permissions_base.len(), 2);
|
||||
|
||||
builder = builder.with_config(|config| {
|
||||
config.approval_policy = Constrained::allow_any(AskForApproval::UnlessTrusted);
|
||||
});
|
||||
let resumed = builder.resume(&server, home, rollout_path.clone()).await?;
|
||||
resumed
|
||||
.codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "after resume".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
wait_for_event(&resumed.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let body3 = req3.single_request().body_json();
|
||||
let input3 = body3["input"].as_array().expect("input array");
|
||||
let permissions_resume = permissions_texts(input3);
|
||||
assert_eq!(permissions_resume.len(), permissions_base.len() + 1);
|
||||
assert_eq!(
|
||||
&permissions_resume[..permissions_base.len()],
|
||||
permissions_base.as_slice()
|
||||
);
|
||||
assert!(!permissions_base.contains(permissions_resume.last().expect("new permissions")));
|
||||
|
||||
let mut fork_config = initial.config.clone();
|
||||
fork_config.approval_policy = Constrained::allow_any(AskForApproval::UnlessTrusted);
|
||||
let forked = initial
|
||||
.thread_manager
|
||||
.fork_thread(usize::MAX, fork_config, rollout_path)
|
||||
.await?;
|
||||
forked
|
||||
.thread
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "after fork".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
wait_for_event(&forked.thread, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let body4 = req4.single_request().body_json();
|
||||
let input4 = body4["input"].as_array().expect("input array");
|
||||
let permissions_fork = permissions_texts(input4);
|
||||
assert_eq!(permissions_fork.len(), permissions_base.len() + 2);
|
||||
assert_eq!(
|
||||
&permissions_fork[..permissions_base.len()],
|
||||
permissions_base.as_slice()
|
||||
);
|
||||
let new_permissions = &permissions_fork[permissions_base.len()..];
|
||||
assert_eq!(new_permissions.len(), 2);
|
||||
assert_eq!(new_permissions[0], new_permissions[1]);
|
||||
assert!(!permissions_base.contains(&new_permissions[0]));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn permissions_message_includes_writable_roots() -> Result<()> {
|
||||
skip_if_no_network!(Ok(()));
|
||||
|
||||
let server = start_mock_server().await;
|
||||
let req = mount_sse_once(&server, sse_completed("resp-1")).await;
|
||||
let writable = TempDir::new()?;
|
||||
let writable_root = AbsolutePathBuf::try_from(writable.path())?;
|
||||
let sandbox_policy = SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![writable_root],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
};
|
||||
|
||||
let mut builder = test_codex().with_config(move |config| {
|
||||
config.approval_policy = Constrained::allow_any(AskForApproval::OnRequest);
|
||||
config.sandbox_policy = Constrained::allow_any(sandbox_policy);
|
||||
});
|
||||
let test = builder.build(&server).await?;
|
||||
|
||||
test.codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![UserInput::Text {
|
||||
text: "hello".into(),
|
||||
}],
|
||||
final_output_json_schema: None,
|
||||
})
|
||||
.await?;
|
||||
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let body = req.single_request().body_json();
|
||||
let input = body["input"].as_array().expect("input array");
|
||||
let permissions = permissions_texts(input);
|
||||
let sandbox_text = "Filesystem sandboxing defines which files can be read or written. `sandbox_mode` is `workspace-write`: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval. Network access is restricted.";
|
||||
let approval_text = " Approvals are your mechanism to get user consent to run shell commands without the sandbox. `approval_policy` is `on-request`: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task.\n\nHere are scenarios where you'll need to request approval:\n- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var)\n- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.\n- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)\n- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `sandbox_permissions` and `justification` parameters - do not message the user before requesting approval for the command.\n- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for.\n\nWhen requesting approval to execute a command that will require escalated privileges:\n - Provide the `sandbox_permissions` parameter with the value `\"require_escalated\"`\n - Include a short, 1 sentence explanation for why you need escalated permissions in the justification parameter";
|
||||
// Normalize paths by removing trailing slashes to match AbsolutePathBuf behavior
|
||||
let normalize_path =
|
||||
|p: &std::path::Path| -> String { p.to_string_lossy().trim_end_matches('/').to_string() };
|
||||
let mut roots = vec![
|
||||
normalize_path(writable.path()),
|
||||
normalize_path(test.config.cwd.as_path()),
|
||||
];
|
||||
if cfg!(unix) && std::path::Path::new("/tmp").is_dir() {
|
||||
roots.push("/tmp".to_string());
|
||||
}
|
||||
if let Some(tmpdir) = std::env::var_os("TMPDIR") {
|
||||
let tmpdir_path = std::path::PathBuf::from(&tmpdir);
|
||||
if tmpdir_path.is_absolute() && !tmpdir.is_empty() {
|
||||
roots.push(normalize_path(&tmpdir_path));
|
||||
}
|
||||
}
|
||||
let roots_text = if roots.len() == 1 {
|
||||
format!(" The writable root is `{}`.", roots[0])
|
||||
} else {
|
||||
format!(
|
||||
" The writable roots are {}.",
|
||||
roots
|
||||
.iter()
|
||||
.map(|root| format!("`{root}`"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
)
|
||||
};
|
||||
let expected = format!(
|
||||
"<permissions instructions>{sandbox_text}{approval_text}{roots_text}</permissions instructions>"
|
||||
);
|
||||
// Normalize line endings to handle Windows vs Unix differences
|
||||
let normalize_line_endings = |s: &str| s.replace("\r\n", "\n");
|
||||
let expected_normalized = normalize_line_endings(&expected);
|
||||
let actual_normalized: Vec<String> = permissions
|
||||
.iter()
|
||||
.map(|s| normalize_line_endings(s))
|
||||
.collect();
|
||||
assert_eq!(actual_normalized, vec![expected_normalized]);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -36,9 +36,6 @@ fn default_env_context_str(cwd: &str, shell: &Shell) -> String {
|
||||
format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{cwd}</cwd>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
<sandbox_mode>read-only</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
<shell>{shell_name}</shell>
|
||||
</environment_context>"#
|
||||
)
|
||||
@@ -252,9 +249,13 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
|
||||
let body1 = req1.single_request().body_json();
|
||||
let input1 = body1["input"].as_array().expect("input array");
|
||||
assert_eq!(input1.len(), 3, "expected cached prefix + env + user msg");
|
||||
assert_eq!(
|
||||
input1.len(),
|
||||
4,
|
||||
"expected permissions + cached prefix + env + user msg"
|
||||
);
|
||||
|
||||
let ui_text = input1[0]["content"][0]["text"]
|
||||
let ui_text = input1[1]["content"][0]["text"]
|
||||
.as_str()
|
||||
.expect("ui message text");
|
||||
assert!(
|
||||
@@ -266,11 +267,11 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
let cwd_str = config.cwd.to_string_lossy();
|
||||
let expected_env_text = default_env_context_str(&cwd_str, &shell);
|
||||
assert_eq!(
|
||||
input1[1],
|
||||
input1[2],
|
||||
text_user_input(expected_env_text),
|
||||
"expected environment context after UI message"
|
||||
);
|
||||
assert_eq!(input1[2], text_user_input("hello 1".to_string()));
|
||||
assert_eq!(input1[3], text_user_input("hello 1".to_string()));
|
||||
|
||||
let body2 = req2.single_request().body_json();
|
||||
let input2 = body2["input"].as_array().expect("input array");
|
||||
@@ -312,16 +313,17 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() -> an
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
|
||||
|
||||
let writable = TempDir::new().unwrap();
|
||||
let new_policy = SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![writable.path().try_into().unwrap()],
|
||||
network_access: true,
|
||||
exclude_tmpdir_env_var: true,
|
||||
exclude_slash_tmp: true,
|
||||
};
|
||||
codex
|
||||
.submit(Op::OverrideTurnContext {
|
||||
cwd: None,
|
||||
approval_policy: Some(AskForApproval::Never),
|
||||
sandbox_policy: Some(SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![writable.path().try_into().unwrap()],
|
||||
network_access: true,
|
||||
exclude_tmpdir_env_var: true,
|
||||
exclude_slash_tmp: true,
|
||||
}),
|
||||
sandbox_policy: Some(new_policy.clone()),
|
||||
model: Some("o3".to_string()),
|
||||
effort: Some(Some(ReasoningEffort::High)),
|
||||
summary: Some(ReasoningSummary::Detailed),
|
||||
@@ -354,36 +356,18 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() -> an
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": "hello 2" } ]
|
||||
});
|
||||
// After overriding the turn context, the environment context should be emitted again
|
||||
// reflecting the new approval policy and sandbox settings. Omit cwd because it did
|
||||
// not change.
|
||||
let shell = default_user_shell();
|
||||
let expected_env_text_2 = format!(
|
||||
r#"<environment_context>
|
||||
<approval_policy>never</approval_policy>
|
||||
<sandbox_mode>workspace-write</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
<writable_roots>
|
||||
<root>{}</root>
|
||||
</writable_roots>
|
||||
<shell>{}</shell>
|
||||
</environment_context>"#,
|
||||
writable.path().display(),
|
||||
shell.name()
|
||||
let expected_permissions_msg = body1["input"][0].clone();
|
||||
// After overriding the turn context, emit a new permissions message.
|
||||
let body1_input = body1["input"].as_array().expect("input array");
|
||||
let expected_permissions_msg_2 = body2["input"][body1_input.len()].clone();
|
||||
assert_ne!(
|
||||
expected_permissions_msg_2, expected_permissions_msg,
|
||||
"expected updated permissions message after override"
|
||||
);
|
||||
let expected_env_msg_2 = serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": expected_env_text_2 } ]
|
||||
});
|
||||
let expected_body2 = serde_json::json!(
|
||||
[
|
||||
body1["input"].as_array().unwrap().as_slice(),
|
||||
[expected_env_msg_2, expected_user_message_2].as_slice(),
|
||||
]
|
||||
.concat()
|
||||
);
|
||||
assert_eq!(body2["input"], expected_body2);
|
||||
let mut expected_body2 = body1["input"].as_array().expect("input array").to_vec();
|
||||
expected_body2.push(expected_permissions_msg_2);
|
||||
expected_body2.push(expected_user_message_2);
|
||||
assert_eq!(body2["input"], serde_json::Value::Array(expected_body2));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -439,10 +423,8 @@ async fn override_before_first_turn_emits_environment_context() -> anyhow::Resul
|
||||
.filter(|text| text.starts_with(ENVIRONMENT_CONTEXT_OPEN_TAG))
|
||||
.collect();
|
||||
assert!(
|
||||
env_texts
|
||||
.iter()
|
||||
.any(|text| text.contains("<approval_policy>never</approval_policy>")),
|
||||
"environment context should reflect overridden approval policy: {env_texts:?}"
|
||||
!env_texts.is_empty(),
|
||||
"expected environment context to be emitted: {env_texts:?}"
|
||||
);
|
||||
|
||||
let env_count = input
|
||||
@@ -462,9 +444,29 @@ async fn override_before_first_turn_emits_environment_context() -> anyhow::Resul
|
||||
.is_some()
|
||||
})
|
||||
.count();
|
||||
assert_eq!(
|
||||
env_count, 2,
|
||||
"environment context should appear exactly twice, found {env_count}"
|
||||
assert!(
|
||||
env_count >= 1,
|
||||
"environment context should appear at least once, found {env_count}"
|
||||
);
|
||||
|
||||
let permissions_texts: Vec<&str> = input
|
||||
.iter()
|
||||
.filter_map(|msg| {
|
||||
let role = msg["role"].as_str()?;
|
||||
if role != "developer" {
|
||||
return None;
|
||||
}
|
||||
msg["content"]
|
||||
.as_array()
|
||||
.and_then(|content| content.first())
|
||||
.and_then(|item| item["text"].as_str())
|
||||
})
|
||||
.collect();
|
||||
assert!(
|
||||
permissions_texts
|
||||
.iter()
|
||||
.any(|text| text.contains("`approval_policy` is `never`")),
|
||||
"permissions message should reflect overridden approval policy: {permissions_texts:?}"
|
||||
);
|
||||
|
||||
let user_texts: Vec<&str> = input
|
||||
@@ -514,6 +516,12 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() -> anyhow::Res
|
||||
// Second turn using per-turn overrides via UserTurn
|
||||
let new_cwd = TempDir::new().unwrap();
|
||||
let writable = TempDir::new().unwrap();
|
||||
let new_policy = SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![AbsolutePathBuf::try_from(writable.path()).unwrap()],
|
||||
network_access: true,
|
||||
exclude_tmpdir_env_var: true,
|
||||
exclude_slash_tmp: true,
|
||||
};
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
@@ -521,12 +529,7 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() -> anyhow::Res
|
||||
}],
|
||||
cwd: new_cwd.path().to_path_buf(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![AbsolutePathBuf::try_from(writable.path()).unwrap()],
|
||||
network_access: true,
|
||||
exclude_tmpdir_env_var: true,
|
||||
exclude_slash_tmp: true,
|
||||
},
|
||||
sandbox_policy: new_policy.clone(),
|
||||
model: "o3".to_string(),
|
||||
effort: Some(ReasoningEffort::High),
|
||||
summary: ReasoningSummary::Detailed,
|
||||
@@ -556,31 +559,28 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() -> anyhow::Res
|
||||
let expected_env_text_2 = format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
<approval_policy>never</approval_policy>
|
||||
<sandbox_mode>workspace-write</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
<writable_roots>
|
||||
<root>{}</root>
|
||||
</writable_roots>
|
||||
<shell>{}</shell>
|
||||
</environment_context>"#,
|
||||
new_cwd.path().display(),
|
||||
writable.path().display(),
|
||||
shell.name(),
|
||||
shell.name()
|
||||
);
|
||||
let expected_env_msg_2 = serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": expected_env_text_2 } ]
|
||||
});
|
||||
let expected_body2 = serde_json::json!(
|
||||
[
|
||||
body1["input"].as_array().unwrap().as_slice(),
|
||||
[expected_env_msg_2, expected_user_message_2].as_slice(),
|
||||
]
|
||||
.concat()
|
||||
let expected_permissions_msg = body1["input"][0].clone();
|
||||
let body1_input = body1["input"].as_array().expect("input array");
|
||||
let expected_permissions_msg_2 = body2["input"][body1_input.len() + 1].clone();
|
||||
assert_ne!(
|
||||
expected_permissions_msg_2, expected_permissions_msg,
|
||||
"expected updated permissions message after per-turn override"
|
||||
);
|
||||
assert_eq!(body2["input"], expected_body2);
|
||||
let mut expected_body2 = body1_input.to_vec();
|
||||
expected_body2.push(expected_env_msg_2);
|
||||
expected_body2.push(expected_permissions_msg_2);
|
||||
expected_body2.push(expected_user_message_2);
|
||||
assert_eq!(body2["input"], serde_json::Value::Array(expected_body2));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -648,7 +648,8 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
|
||||
let body1 = req1.single_request().body_json();
|
||||
let body2 = req2.single_request().body_json();
|
||||
|
||||
let expected_ui_msg = body1["input"][0].clone();
|
||||
let expected_permissions_msg = body1["input"][0].clone();
|
||||
let expected_ui_msg = body1["input"][1].clone();
|
||||
|
||||
let shell = default_user_shell();
|
||||
let default_cwd_lossy = default_cwd.to_string_lossy();
|
||||
@@ -657,6 +658,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
|
||||
let expected_user_message_1 = text_user_input("hello 1".to_string());
|
||||
|
||||
let expected_input_1 = serde_json::Value::Array(vec![
|
||||
expected_permissions_msg.clone(),
|
||||
expected_ui_msg.clone(),
|
||||
expected_env_msg_1.clone(),
|
||||
expected_user_message_1.clone(),
|
||||
@@ -665,6 +667,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
|
||||
|
||||
let expected_user_message_2 = text_user_input("hello 2".to_string());
|
||||
let expected_input_2 = serde_json::Value::Array(vec![
|
||||
expected_permissions_msg,
|
||||
expected_ui_msg,
|
||||
expected_env_msg_1,
|
||||
expected_user_message_1,
|
||||
@@ -738,34 +741,34 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
|
||||
let body1 = req1.single_request().body_json();
|
||||
let body2 = req2.single_request().body_json();
|
||||
|
||||
let expected_ui_msg = body1["input"][0].clone();
|
||||
let expected_permissions_msg = body1["input"][0].clone();
|
||||
let expected_ui_msg = body1["input"][1].clone();
|
||||
|
||||
let shell = default_user_shell();
|
||||
let expected_env_text_1 = default_env_context_str(&default_cwd.to_string_lossy(), &shell);
|
||||
let expected_env_msg_1 = text_user_input(expected_env_text_1);
|
||||
let expected_user_message_1 = text_user_input("hello 1".to_string());
|
||||
let expected_input_1 = serde_json::Value::Array(vec![
|
||||
expected_permissions_msg.clone(),
|
||||
expected_ui_msg.clone(),
|
||||
expected_env_msg_1.clone(),
|
||||
expected_user_message_1.clone(),
|
||||
]);
|
||||
assert_eq!(body1["input"], expected_input_1);
|
||||
|
||||
let shell_name = shell.name();
|
||||
let expected_env_msg_2 = text_user_input(format!(
|
||||
r#"<environment_context>
|
||||
<approval_policy>never</approval_policy>
|
||||
<sandbox_mode>danger-full-access</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
<shell>{shell_name}</shell>
|
||||
</environment_context>"#
|
||||
));
|
||||
let body1_input = body1["input"].as_array().expect("input array");
|
||||
let expected_permissions_msg_2 = body2["input"][body1_input.len()].clone();
|
||||
assert_ne!(
|
||||
expected_permissions_msg_2, expected_permissions_msg,
|
||||
"expected updated permissions message after policy change"
|
||||
);
|
||||
let expected_user_message_2 = text_user_input("hello 2".to_string());
|
||||
let expected_input_2 = serde_json::Value::Array(vec![
|
||||
expected_permissions_msg,
|
||||
expected_ui_msg,
|
||||
expected_env_msg_1,
|
||||
expected_user_message_1,
|
||||
expected_env_msg_2,
|
||||
expected_permissions_msg_2,
|
||||
expected_user_message_2,
|
||||
]);
|
||||
assert_eq!(body2["input"], expected_input_2);
|
||||
|
||||
@@ -73,7 +73,8 @@ async fn stdio_server_round_trip() -> anyhow::Result<()> {
|
||||
|
||||
let fixture = test_codex()
|
||||
.with_config(move |config| {
|
||||
config.mcp_servers.insert(
|
||||
let mut servers = config.mcp_servers.get().clone();
|
||||
servers.insert(
|
||||
server_name.to_string(),
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::Stdio {
|
||||
@@ -93,6 +94,10 @@ async fn stdio_server_round_trip() -> anyhow::Result<()> {
|
||||
disabled_tools: None,
|
||||
},
|
||||
);
|
||||
config
|
||||
.mcp_servers
|
||||
.set(servers)
|
||||
.expect("test mcp servers should accept any configuration");
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
@@ -204,7 +209,8 @@ async fn stdio_image_responses_round_trip() -> anyhow::Result<()> {
|
||||
|
||||
let fixture = test_codex()
|
||||
.with_config(move |config| {
|
||||
config.mcp_servers.insert(
|
||||
let mut servers = config.mcp_servers.get().clone();
|
||||
servers.insert(
|
||||
server_name.to_string(),
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::Stdio {
|
||||
@@ -224,6 +230,10 @@ async fn stdio_image_responses_round_trip() -> anyhow::Result<()> {
|
||||
disabled_tools: None,
|
||||
},
|
||||
);
|
||||
config
|
||||
.mcp_servers
|
||||
.set(servers)
|
||||
.expect("test mcp servers should accept any configuration");
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
@@ -393,7 +403,8 @@ async fn stdio_image_completions_round_trip() -> anyhow::Result<()> {
|
||||
let fixture = test_codex()
|
||||
.with_config(move |config| {
|
||||
config.model_provider.wire_api = codex_core::WireApi::Chat;
|
||||
config.mcp_servers.insert(
|
||||
let mut servers = config.mcp_servers.get().clone();
|
||||
servers.insert(
|
||||
server_name.to_string(),
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::Stdio {
|
||||
@@ -413,6 +424,10 @@ async fn stdio_image_completions_round_trip() -> anyhow::Result<()> {
|
||||
disabled_tools: None,
|
||||
},
|
||||
);
|
||||
config
|
||||
.mcp_servers
|
||||
.set(servers)
|
||||
.expect("test mcp servers should accept any configuration");
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
@@ -533,7 +548,8 @@ async fn stdio_server_propagates_whitelisted_env_vars() -> anyhow::Result<()> {
|
||||
|
||||
let fixture = test_codex()
|
||||
.with_config(move |config| {
|
||||
config.mcp_servers.insert(
|
||||
let mut servers = config.mcp_servers.get().clone();
|
||||
servers.insert(
|
||||
server_name.to_string(),
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::Stdio {
|
||||
@@ -550,6 +566,10 @@ async fn stdio_server_propagates_whitelisted_env_vars() -> anyhow::Result<()> {
|
||||
disabled_tools: None,
|
||||
},
|
||||
);
|
||||
config
|
||||
.mcp_servers
|
||||
.set(servers)
|
||||
.expect("test mcp servers should accept any configuration");
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
@@ -676,7 +696,8 @@ async fn streamable_http_tool_call_round_trip() -> anyhow::Result<()> {
|
||||
|
||||
let fixture = test_codex()
|
||||
.with_config(move |config| {
|
||||
config.mcp_servers.insert(
|
||||
let mut servers = config.mcp_servers.get().clone();
|
||||
servers.insert(
|
||||
server_name.to_string(),
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::StreamableHttp {
|
||||
@@ -692,6 +713,10 @@ async fn streamable_http_tool_call_round_trip() -> anyhow::Result<()> {
|
||||
disabled_tools: None,
|
||||
},
|
||||
);
|
||||
config
|
||||
.mcp_servers
|
||||
.set(servers)
|
||||
.expect("test mcp servers should accept any configuration");
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
@@ -850,7 +875,8 @@ async fn streamable_http_with_oauth_round_trip() -> anyhow::Result<()> {
|
||||
|
||||
let fixture = test_codex()
|
||||
.with_config(move |config| {
|
||||
config.mcp_servers.insert(
|
||||
let mut servers = config.mcp_servers.get().clone();
|
||||
servers.insert(
|
||||
server_name.to_string(),
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::StreamableHttp {
|
||||
@@ -866,6 +892,10 @@ async fn streamable_http_with_oauth_round_trip() -> anyhow::Result<()> {
|
||||
disabled_tools: None,
|
||||
},
|
||||
);
|
||||
config
|
||||
.mcp_servers
|
||||
.set(servers)
|
||||
.expect("test mcp servers should accept any configuration");
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
|
||||
@@ -67,7 +67,7 @@ async fn retries_on_early_close() {
|
||||
name: "openai".into(),
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
// Environment variable that should exist in the test environment.
|
||||
// ModelClient will return an error if the environment variable for the
|
||||
// ModelClientSession will return an error if the environment variable for the
|
||||
// provider is not set.
|
||||
env_key: Some("PATH".into()),
|
||||
env_key_instructions: None,
|
||||
|
||||
@@ -414,7 +414,8 @@ async fn mcp_tool_call_output_exceeds_limit_truncated_for_model() -> Result<()>
|
||||
let rmcp_test_server_bin = stdio_server_bin()?;
|
||||
|
||||
let mut builder = test_codex().with_config(move |config| {
|
||||
config.mcp_servers.insert(
|
||||
let mut servers = config.mcp_servers.get().clone();
|
||||
servers.insert(
|
||||
server_name.to_string(),
|
||||
codex_core::config::types::McpServerConfig {
|
||||
transport: codex_core::config::types::McpServerTransportConfig::Stdio {
|
||||
@@ -431,6 +432,10 @@ async fn mcp_tool_call_output_exceeds_limit_truncated_for_model() -> Result<()>
|
||||
disabled_tools: None,
|
||||
},
|
||||
);
|
||||
config
|
||||
.mcp_servers
|
||||
.set(servers)
|
||||
.expect("test mcp servers should accept any configuration");
|
||||
config.tool_output_token_limit = Some(500);
|
||||
});
|
||||
let fixture = builder.build(&server).await?;
|
||||
@@ -497,7 +502,8 @@ async fn mcp_image_output_preserves_image_and_no_text_summary() -> Result<()> {
|
||||
let openai_png = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMB/ee9bQAAAABJRU5ErkJggg==";
|
||||
|
||||
let mut builder = test_codex().with_config(move |config| {
|
||||
config.mcp_servers.insert(
|
||||
let mut servers = config.mcp_servers.get().clone();
|
||||
servers.insert(
|
||||
server_name.to_string(),
|
||||
McpServerConfig {
|
||||
transport: McpServerTransportConfig::Stdio {
|
||||
@@ -517,6 +523,10 @@ async fn mcp_image_output_preserves_image_and_no_text_summary() -> Result<()> {
|
||||
disabled_tools: None,
|
||||
},
|
||||
);
|
||||
config
|
||||
.mcp_servers
|
||||
.set(servers)
|
||||
.expect("test mcp servers should accept any configuration");
|
||||
});
|
||||
let fixture = builder.build(&server).await?;
|
||||
let session_model = fixture.session_configured.model.clone();
|
||||
@@ -754,7 +764,8 @@ async fn mcp_tool_call_output_not_truncated_with_custom_limit() -> Result<()> {
|
||||
|
||||
let mut builder = test_codex().with_config(move |config| {
|
||||
config.tool_output_token_limit = Some(50_000);
|
||||
config.mcp_servers.insert(
|
||||
let mut servers = config.mcp_servers.get().clone();
|
||||
servers.insert(
|
||||
server_name.to_string(),
|
||||
codex_core::config::types::McpServerConfig {
|
||||
transport: codex_core::config::types::McpServerTransportConfig::Stdio {
|
||||
@@ -771,6 +782,10 @@ async fn mcp_tool_call_output_not_truncated_with_custom_limit() -> Result<()> {
|
||||
disabled_tools: None,
|
||||
},
|
||||
);
|
||||
config
|
||||
.mcp_servers
|
||||
.set(servers)
|
||||
.expect("test mcp servers should accept any configuration");
|
||||
});
|
||||
let fixture = builder.build(&server).await?;
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ pub struct Cli {
|
||||
#[arg(long = "oss", default_value_t = false)]
|
||||
pub oss: bool,
|
||||
|
||||
/// Specify which local provider to use (lmstudio or ollama).
|
||||
/// Specify which local provider to use (lmstudio, ollama, or ollama-chat).
|
||||
/// If not specified with --oss, will use config default or show selection.
|
||||
#[arg(long = "local-provider")]
|
||||
pub oss_provider: Option<String>,
|
||||
|
||||
@@ -15,9 +15,11 @@ pub use cli::Command;
|
||||
pub use cli::ReviewArgs;
|
||||
use codex_common::oss::ensure_oss_provider_ready;
|
||||
use codex_common::oss::get_default_model_for_oss_provider;
|
||||
use codex_common::oss::ollama_chat_deprecation_notice;
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::LMSTUDIO_OSS_PROVIDER_ID;
|
||||
use codex_core::NewThread;
|
||||
use codex_core::OLLAMA_CHAT_PROVIDER_ID;
|
||||
use codex_core::OLLAMA_OSS_PROVIDER_ID;
|
||||
use codex_core::ThreadManager;
|
||||
use codex_core::auth::enforce_login_restrictions;
|
||||
@@ -176,7 +178,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
Some(provider)
|
||||
} else {
|
||||
return Err(anyhow::anyhow!(
|
||||
"No default OSS provider configured. Use --local-provider=provider or set oss_provider to either {LMSTUDIO_OSS_PROVIDER_ID} or {OLLAMA_OSS_PROVIDER_ID} in config.toml"
|
||||
"No default OSS provider configured. Use --local-provider=provider or set oss_provider to one of: {LMSTUDIO_OSS_PROVIDER_ID}, {OLLAMA_OSS_PROVIDER_ID}, {OLLAMA_CHAT_PROVIDER_ID} in config.toml"
|
||||
));
|
||||
}
|
||||
} else {
|
||||
@@ -223,6 +225,14 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
let ollama_chat_support_notice = match ollama_chat_deprecation_notice(&config).await {
|
||||
Ok(notice) => notice,
|
||||
Err(err) => {
|
||||
tracing::warn!(?err, "Failed to detect Ollama wire API");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let otel =
|
||||
codex_core::otel_init::build_provider(&config, env!("CARGO_PKG_VERSION"), None, false);
|
||||
|
||||
@@ -253,6 +263,12 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
last_message_file.clone(),
|
||||
)),
|
||||
};
|
||||
if let Some(notice) = ollama_chat_support_notice {
|
||||
event_processor.process_event(Event {
|
||||
id: String::new(),
|
||||
msg: EventMsg::DeprecationNotice(notice),
|
||||
});
|
||||
}
|
||||
|
||||
if oss {
|
||||
// We're in the oss section, so provider_id should be Some
|
||||
|
||||
@@ -61,6 +61,7 @@ impl Policy {
|
||||
Evaluation::from_matches(matched_rules)
|
||||
}
|
||||
|
||||
/// Checks multiple commands and aggregates the results.
|
||||
pub fn check_multiple<Commands, F>(
|
||||
&self,
|
||||
commands: Commands,
|
||||
@@ -81,12 +82,19 @@ impl Policy {
|
||||
Evaluation::from_matches(matched_rules)
|
||||
}
|
||||
|
||||
/// Returns matching rules for the given command. If no rules match and
|
||||
/// `heuristics_fallback` is provided, returns a single
|
||||
/// `HeuristicsRuleMatch` with the decision rendered by
|
||||
/// `heuristics_fallback`.
|
||||
///
|
||||
/// If `heuristics_fallback.is_some()`, then the returned vector is
|
||||
/// guaranteed to be non-empty.
|
||||
pub fn matches_for_command(
|
||||
&self,
|
||||
cmd: &[String],
|
||||
heuristics_fallback: HeuristicsFallback<'_>,
|
||||
) -> Vec<RuleMatch> {
|
||||
let mut matched_rules: Vec<RuleMatch> = match cmd.first() {
|
||||
let matched_rules: Vec<RuleMatch> = match cmd.first() {
|
||||
Some(first) => self
|
||||
.rules_by_program
|
||||
.get_vec(first)
|
||||
@@ -95,14 +103,16 @@ impl Policy {
|
||||
None => Vec::new(),
|
||||
};
|
||||
|
||||
if let (true, Some(heuristics_fallback)) = (matched_rules.is_empty(), heuristics_fallback) {
|
||||
matched_rules.push(RuleMatch::HeuristicsRuleMatch {
|
||||
if matched_rules.is_empty()
|
||||
&& let Some(heuristics_fallback) = heuristics_fallback
|
||||
{
|
||||
vec![RuleMatch::HeuristicsRuleMatch {
|
||||
command: cmd.to_vec(),
|
||||
decision: heuristics_fallback(cmd),
|
||||
});
|
||||
}]
|
||||
} else {
|
||||
matched_rules
|
||||
}
|
||||
|
||||
matched_rules
|
||||
}
|
||||
}
|
||||
|
||||
@@ -121,12 +131,11 @@ impl Evaluation {
|
||||
.any(|rule_match| !matches!(rule_match, RuleMatch::HeuristicsRuleMatch { .. }))
|
||||
}
|
||||
|
||||
/// Caller is responsible for ensuring that `matched_rules` is non-empty.
|
||||
fn from_matches(matched_rules: Vec<RuleMatch>) -> Self {
|
||||
let decision = matched_rules
|
||||
.iter()
|
||||
.map(RuleMatch::decision)
|
||||
.max()
|
||||
.unwrap_or(Decision::Allow);
|
||||
let decision = matched_rules.iter().map(RuleMatch::decision).max();
|
||||
#[expect(clippy::expect_used)]
|
||||
let decision = decision.expect("invariant failed: matched_rules must be non-empty");
|
||||
|
||||
Self {
|
||||
decision,
|
||||
|
||||
@@ -15,7 +15,7 @@ reqwest = { version = "0.12", features = ["json", "stream"] }
|
||||
serde_json = "1"
|
||||
tokio = { version = "1", features = ["rt"] }
|
||||
tracing = { version = "0.1.43", features = ["log"] }
|
||||
which = "6.0"
|
||||
which = "8.0"
|
||||
|
||||
[dev-dependencies]
|
||||
wiremock = "0.6"
|
||||
|
||||
@@ -381,23 +381,26 @@ async fn codex_tool_passes_base_instructions() -> anyhow::Result<()> {
|
||||
let instructions = request["messages"][0]["content"].as_str().unwrap();
|
||||
assert!(instructions.starts_with("You are a helpful assistant."));
|
||||
|
||||
let developer_msg = request["messages"]
|
||||
let developer_messages: Vec<&serde_json::Value> = request["messages"]
|
||||
.as_array()
|
||||
.and_then(|messages| {
|
||||
messages
|
||||
.iter()
|
||||
.find(|msg| msg.get("role").and_then(|role| role.as_str()) == Some("developer"))
|
||||
})
|
||||
.unwrap();
|
||||
let developer_content = developer_msg
|
||||
.get("content")
|
||||
.and_then(|value| value.as_str())
|
||||
.unwrap();
|
||||
.unwrap()
|
||||
.iter()
|
||||
.filter(|msg| msg.get("role").and_then(|role| role.as_str()) == Some("developer"))
|
||||
.collect();
|
||||
let developer_contents: Vec<&str> = developer_messages
|
||||
.iter()
|
||||
.filter_map(|msg| msg.get("content").and_then(|value| value.as_str()))
|
||||
.collect();
|
||||
assert!(
|
||||
!developer_content.contains('<'),
|
||||
"expected developer instructions without XML tags, got `{developer_content}`"
|
||||
developer_contents
|
||||
.iter()
|
||||
.any(|content| content.contains("`sandbox_mode`")),
|
||||
"expected permissions developer message, got {developer_contents:?}"
|
||||
);
|
||||
assert!(
|
||||
developer_contents.contains(&"Foreshadow upcoming tool calls."),
|
||||
"expected developer instructions in developer messages, got {developer_contents:?}"
|
||||
);
|
||||
assert_eq!(developer_content, "Foreshadow upcoming tool calls.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ bytes = { workspace = true }
|
||||
codex-core = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
reqwest = { workspace = true, features = ["json", "stream"] }
|
||||
semver = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"io-std",
|
||||
@@ -30,3 +31,4 @@ wiremock = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_matches = { workspace = true }
|
||||
pretty_assertions = { workspace = true }
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use bytes::BytesMut;
|
||||
use futures::StreamExt;
|
||||
use futures::stream::BoxStream;
|
||||
use semver::Version;
|
||||
use serde_json::Value as JsonValue;
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
@@ -53,7 +54,7 @@ impl OllamaClient {
|
||||
}
|
||||
|
||||
/// Build a client from a provider definition and verify the server is reachable.
|
||||
async fn try_from_provider(provider: &ModelProviderInfo) -> io::Result<Self> {
|
||||
pub(crate) async fn try_from_provider(provider: &ModelProviderInfo) -> io::Result<Self> {
|
||||
#![expect(clippy::expect_used)]
|
||||
let base_url = provider
|
||||
.base_url
|
||||
@@ -125,6 +126,32 @@ impl OllamaClient {
|
||||
Ok(names)
|
||||
}
|
||||
|
||||
/// Query the server for its version string, returning `None` when unavailable.
|
||||
pub async fn fetch_version(&self) -> io::Result<Option<Version>> {
|
||||
let version_url = format!("{}/api/version", self.host_root.trim_end_matches('/'));
|
||||
let resp = self
|
||||
.client
|
||||
.get(version_url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(io::Error::other)?;
|
||||
if !resp.status().is_success() {
|
||||
return Ok(None);
|
||||
}
|
||||
let val = resp.json::<JsonValue>().await.map_err(io::Error::other)?;
|
||||
let Some(version_str) = val.get("version").and_then(|v| v.as_str()).map(str::trim) else {
|
||||
return Ok(None);
|
||||
};
|
||||
let normalized = version_str.trim_start_matches('v');
|
||||
match Version::parse(normalized) {
|
||||
Ok(version) => Ok(Some(version)),
|
||||
Err(err) => {
|
||||
tracing::warn!("Failed to parse Ollama version `{version_str}`: {err}");
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Start a model pull and emit streaming events. The returned stream ends when
|
||||
/// a Success event is observed or the server closes the connection.
|
||||
pub async fn pull_model_stream(
|
||||
@@ -236,6 +263,7 @@ impl OllamaClient {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
// Happy-path tests using a mock HTTP server; skip if sandbox network is disabled.
|
||||
#[tokio::test]
|
||||
@@ -269,6 +297,42 @@ mod tests {
|
||||
assert!(models.contains(&"mistral".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_fetch_version() {
|
||||
if std::env::var(codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
tracing::info!(
|
||||
"{} is set; skipping test_fetch_version",
|
||||
codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let server = wiremock::MockServer::start().await;
|
||||
wiremock::Mock::given(wiremock::matchers::method("GET"))
|
||||
.and(wiremock::matchers::path("/api/tags"))
|
||||
.respond_with(wiremock::ResponseTemplate::new(200).set_body_raw(
|
||||
serde_json::json!({ "models": [] }).to_string(),
|
||||
"application/json",
|
||||
))
|
||||
.mount(&server)
|
||||
.await;
|
||||
wiremock::Mock::given(wiremock::matchers::method("GET"))
|
||||
.and(wiremock::matchers::path("/api/version"))
|
||||
.respond_with(wiremock::ResponseTemplate::new(200).set_body_raw(
|
||||
serde_json::json!({ "version": "0.14.1" }).to_string(),
|
||||
"application/json",
|
||||
))
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let client = OllamaClient::try_from_provider_with_base_url(server.uri().as_str())
|
||||
.await
|
||||
.expect("client");
|
||||
|
||||
let version = client.fetch_version().await.expect("version fetch");
|
||||
assert_eq!(version, Some(Version::new(0, 14, 1)));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_probe_server_happy_path_openai_compat_and_native() {
|
||||
if std::env::var(codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user