mirror of
https://github.com/openai/codex.git
synced 2026-02-02 15:03:38 +00:00
Compare commits
16 Commits
patch-squa
...
rakesh/Sup
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
782a3005c1 | ||
|
|
44bb53df1e | ||
|
|
8453915e02 | ||
|
|
44587c2443 | ||
|
|
8f7b22b652 | ||
|
|
027944c64e | ||
|
|
bec51f6c05 | ||
|
|
66967500bb | ||
|
|
167b4f0e25 | ||
|
|
167154178b | ||
|
|
674e3d3c90 | ||
|
|
114ce9ff4d | ||
|
|
e13b35ecb0 | ||
|
|
377af75730 | ||
|
|
86e0f31a7e | ||
|
|
8f837f1093 |
2
.github/workflows/rust-ci.yml
vendored
2
.github/workflows/rust-ci.yml
vendored
@@ -62,6 +62,8 @@ jobs:
|
||||
components: rustfmt
|
||||
- name: cargo fmt
|
||||
run: cargo fmt -- --config imports_granularity=Item --check
|
||||
- name: Verify codegen for mcp-types
|
||||
run: ./mcp-types/check_lib_rs.py
|
||||
|
||||
cargo_shear:
|
||||
name: cargo shear
|
||||
|
||||
19
.github/workflows/rust-release.yml
vendored
19
.github/workflows/rust-release.yml
vendored
@@ -219,3 +219,22 @@ jobs:
|
||||
with:
|
||||
tag: ${{ github.ref_name }}
|
||||
config: .github/dotslash-config.json
|
||||
|
||||
update-branch:
|
||||
name: Update latest-alpha-cli branch
|
||||
permissions:
|
||||
contents: write
|
||||
needs: release
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Update latest-alpha-cli branch
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
gh api \
|
||||
repos/${GITHUB_REPOSITORY}/git/refs/heads/latest-alpha-cli \
|
||||
-X PATCH \
|
||||
-f sha="${GITHUB_SHA}" \
|
||||
-F force=true
|
||||
|
||||
1
codex-rs/Cargo.lock
generated
1
codex-rs/Cargo.lock
generated
@@ -561,7 +561,6 @@ dependencies = [
|
||||
"clap",
|
||||
"codex-common",
|
||||
"codex-core",
|
||||
"codex-protocol",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
|
||||
@@ -34,6 +34,7 @@ rust = {}
|
||||
|
||||
[workspace.lints.clippy]
|
||||
expect_used = "deny"
|
||||
redundant_clone = "deny"
|
||||
uninlined_format_args = "deny"
|
||||
unwrap_used = "deny"
|
||||
|
||||
|
||||
@@ -733,6 +733,8 @@ fn compute_replacements(
|
||||
}
|
||||
}
|
||||
|
||||
replacements.sort_by(|(lhs_idx, _, _), (rhs_idx, _, _)| lhs_idx.cmp(rhs_idx));
|
||||
|
||||
Ok(replacements)
|
||||
}
|
||||
|
||||
@@ -1216,6 +1218,33 @@ PATCH"#,
|
||||
assert_eq!(contents, "a\nB\nc\nd\nE\nf\ng\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pure_addition_chunk_followed_by_removal() {
|
||||
let dir = tempdir().unwrap();
|
||||
let path = dir.path().join("panic.txt");
|
||||
fs::write(&path, "line1\nline2\nline3\n").unwrap();
|
||||
let patch = wrap_patch(&format!(
|
||||
r#"*** Update File: {}
|
||||
@@
|
||||
+after-context
|
||||
+second-line
|
||||
@@
|
||||
line1
|
||||
-line2
|
||||
-line3
|
||||
+line2-replacement"#,
|
||||
path.display()
|
||||
));
|
||||
let mut stdout = Vec::new();
|
||||
let mut stderr = Vec::new();
|
||||
apply_patch(&patch, &mut stdout, &mut stderr).unwrap();
|
||||
let contents = fs::read_to_string(path).unwrap();
|
||||
assert_eq!(
|
||||
contents,
|
||||
"line1\nline2-replacement\nafter-context\nsecond-line\n"
|
||||
);
|
||||
}
|
||||
|
||||
/// Ensure that patches authored with ASCII characters can update lines that
|
||||
/// contain typographic Unicode punctuation (e.g. EN DASH, NON-BREAKING
|
||||
/// HYPHEN). Historically `git apply` succeeds in such scenarios but our
|
||||
|
||||
@@ -617,7 +617,7 @@ fn test_parse_patch_lenient() {
|
||||
assert_eq!(
|
||||
parse_patch_text(&patch_text_in_double_quoted_heredoc, ParseMode::Lenient),
|
||||
Ok(ApplyPatchArgs {
|
||||
hunks: expected_patch.clone(),
|
||||
hunks: expected_patch,
|
||||
patch: patch_text.to_string(),
|
||||
workdir: None,
|
||||
})
|
||||
@@ -637,7 +637,7 @@ fn test_parse_patch_lenient() {
|
||||
"<<EOF\n*** Begin Patch\n*** Update File: file2.py\nEOF\n".to_string();
|
||||
assert_eq!(
|
||||
parse_patch_text(&patch_text_with_missing_closing_heredoc, ParseMode::Strict),
|
||||
Err(expected_error.clone())
|
||||
Err(expected_error)
|
||||
);
|
||||
assert_eq!(
|
||||
parse_patch_text(&patch_text_with_missing_closing_heredoc, ParseMode::Lenient),
|
||||
|
||||
@@ -11,7 +11,6 @@ anyhow = "1"
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
codex-common = { path = "../common", features = ["cli"] }
|
||||
codex-core = { path = "../core" }
|
||||
codex-protocol = { path = "../protocol" }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use codex_core::CodexAuth;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use std::path::Path;
|
||||
use std::sync::LazyLock;
|
||||
use std::sync::RwLock;
|
||||
@@ -20,7 +19,7 @@ pub fn set_chatgpt_token_data(value: TokenData) {
|
||||
|
||||
/// Initialize the ChatGPT token from auth.json file
|
||||
pub async fn init_chatgpt_token_from_auth(codex_home: &Path) -> std::io::Result<()> {
|
||||
let auth = CodexAuth::from_codex_home(codex_home, AuthMode::ChatGPT)?;
|
||||
let auth = CodexAuth::from_codex_home(codex_home)?;
|
||||
if let Some(auth) = auth {
|
||||
let token_data = auth.get_token_data().await?;
|
||||
set_chatgpt_token_data(token_data);
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use codex_common::CliConfigOverrides;
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::auth::CLIENT_ID;
|
||||
use codex_core::auth::OPENAI_API_KEY_ENV_VAR;
|
||||
use codex_core::auth::login_with_api_key;
|
||||
use codex_core::auth::logout;
|
||||
use codex_core::config::Config;
|
||||
@@ -9,7 +8,6 @@ use codex_core::config::ConfigOverrides;
|
||||
use codex_login::ServerOptions;
|
||||
use codex_login::run_login_server;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub async fn login_with_chatgpt(codex_home: PathBuf) -> std::io::Result<()> {
|
||||
@@ -57,22 +55,25 @@ pub async fn run_login_with_api_key(
|
||||
}
|
||||
}
|
||||
|
||||
/// Login using the OAuth device code flow.
|
||||
///
|
||||
/// Currently not implemented; exits with a clear message.
|
||||
pub async fn run_login_with_device_code(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
// Parse and load config for consistency with other login commands.
|
||||
let _config = load_config_or_exit(cli_config_overrides);
|
||||
|
||||
eprintln!("Device code login is not supported yet.");
|
||||
std::process::exit(2);
|
||||
}
|
||||
|
||||
pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
|
||||
let config = load_config_or_exit(cli_config_overrides);
|
||||
|
||||
match CodexAuth::from_codex_home(&config.codex_home, config.preferred_auth_method) {
|
||||
match CodexAuth::from_codex_home(&config.codex_home) {
|
||||
Ok(Some(auth)) => match auth.mode {
|
||||
AuthMode::ApiKey => match auth.get_token().await {
|
||||
Ok(api_key) => {
|
||||
eprintln!("Logged in using an API key - {}", safe_format_key(&api_key));
|
||||
|
||||
if let Ok(env_api_key) = env::var(OPENAI_API_KEY_ENV_VAR)
|
||||
&& env_api_key == api_key
|
||||
{
|
||||
eprintln!(
|
||||
" API loaded from OPENAI_API_KEY environment variable or .env file"
|
||||
);
|
||||
}
|
||||
std::process::exit(0);
|
||||
}
|
||||
Err(e) => {
|
||||
|
||||
@@ -10,6 +10,7 @@ use codex_cli::SeatbeltCommand;
|
||||
use codex_cli::login::run_login_status;
|
||||
use codex_cli::login::run_login_with_api_key;
|
||||
use codex_cli::login::run_login_with_chatgpt;
|
||||
use codex_cli::login::run_login_with_device_code;
|
||||
use codex_cli::login::run_logout;
|
||||
use codex_cli::proto;
|
||||
use codex_common::CliConfigOverrides;
|
||||
@@ -108,6 +109,10 @@ struct LoginCommand {
|
||||
#[arg(long = "api-key", value_name = "API_KEY")]
|
||||
api_key: Option<String>,
|
||||
|
||||
/// Use device code flow (not yet supported)
|
||||
#[arg(long = "use-device-code")]
|
||||
use_device_code: bool,
|
||||
|
||||
#[command(subcommand)]
|
||||
action: Option<LoginSubcommand>,
|
||||
}
|
||||
@@ -168,7 +173,9 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
|
||||
run_login_status(login_cli.config_overrides).await;
|
||||
}
|
||||
None => {
|
||||
if let Some(api_key) = login_cli.api_key {
|
||||
if login_cli.use_device_code {
|
||||
run_login_with_device_code(login_cli.config_overrides).await;
|
||||
} else if let Some(api_key) = login_cli.api_key {
|
||||
run_login_with_api_key(login_cli.config_overrides, api_key).await;
|
||||
} else {
|
||||
run_login_with_chatgpt(login_cli.config_overrides).await;
|
||||
|
||||
@@ -37,10 +37,8 @@ pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> {
|
||||
|
||||
let config = Config::load_with_cli_overrides(overrides_vec, ConfigOverrides::default())?;
|
||||
// Use conversation_manager API to start a conversation
|
||||
let conversation_manager = ConversationManager::new(AuthManager::shared(
|
||||
config.codex_home.clone(),
|
||||
config.preferred_auth_method,
|
||||
));
|
||||
let conversation_manager =
|
||||
ConversationManager::new(AuthManager::shared(config.codex_home.clone()));
|
||||
let NewConversation {
|
||||
conversation_id: _,
|
||||
conversation,
|
||||
|
||||
@@ -49,6 +49,13 @@ pub fn builtin_model_presets() -> &'static [ModelPreset] {
|
||||
model: "gpt-5",
|
||||
effort: ReasoningEffort::High,
|
||||
},
|
||||
ModelPreset {
|
||||
id: "gpt-5-high-new",
|
||||
label: "gpt-5 high new",
|
||||
description: "— our latest release tuned to rely on the model's built-in reasoning defaults",
|
||||
model: "gpt-5-high-new",
|
||||
effort: ReasoningEffort::Medium,
|
||||
},
|
||||
];
|
||||
PRESETS
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::time::Duration;
|
||||
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
|
||||
use crate::token_data::PlanType;
|
||||
use crate::token_data::TokenData;
|
||||
use crate::token_data::parse_id_token;
|
||||
|
||||
@@ -70,13 +71,9 @@ impl CodexAuth {
|
||||
Ok(access)
|
||||
}
|
||||
|
||||
/// Loads the available auth information from the auth.json or
|
||||
/// OPENAI_API_KEY environment variable.
|
||||
pub fn from_codex_home(
|
||||
codex_home: &Path,
|
||||
preferred_auth_method: AuthMode,
|
||||
) -> std::io::Result<Option<CodexAuth>> {
|
||||
load_auth(codex_home, true, preferred_auth_method)
|
||||
/// Loads the available auth information from the auth.json.
|
||||
pub fn from_codex_home(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> {
|
||||
load_auth(codex_home)
|
||||
}
|
||||
|
||||
pub async fn get_token_data(&self) -> Result<TokenData, std::io::Error> {
|
||||
@@ -135,13 +132,12 @@ impl CodexAuth {
|
||||
}
|
||||
|
||||
pub fn get_account_id(&self) -> Option<String> {
|
||||
self.get_current_token_data()
|
||||
.and_then(|t| t.account_id.clone())
|
||||
self.get_current_token_data().and_then(|t| t.account_id)
|
||||
}
|
||||
|
||||
pub fn get_plan_type(&self) -> Option<String> {
|
||||
pub(crate) fn get_plan_type(&self) -> Option<PlanType> {
|
||||
self.get_current_token_data()
|
||||
.and_then(|t| t.id_token.chatgpt_plan_type.as_ref().map(|p| p.as_string()))
|
||||
.and_then(|t| t.id_token.chatgpt_plan_type)
|
||||
}
|
||||
|
||||
fn get_current_auth_json(&self) -> Option<AuthDotJson> {
|
||||
@@ -150,7 +146,7 @@ impl CodexAuth {
|
||||
}
|
||||
|
||||
fn get_current_token_data(&self) -> Option<TokenData> {
|
||||
self.get_current_auth_json().and_then(|t| t.tokens.clone())
|
||||
self.get_current_auth_json().and_then(|t| t.tokens)
|
||||
}
|
||||
|
||||
/// Consider this private to integration tests.
|
||||
@@ -193,10 +189,11 @@ impl CodexAuth {
|
||||
|
||||
pub const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY";
|
||||
|
||||
fn read_openai_api_key_from_env() -> Option<String> {
|
||||
pub fn read_openai_api_key_from_env() -> Option<String> {
|
||||
env::var(OPENAI_API_KEY_ENV_VAR)
|
||||
.ok()
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(|value| value.trim().to_string())
|
||||
.filter(|value| !value.is_empty())
|
||||
}
|
||||
|
||||
pub fn get_auth_file(codex_home: &Path) -> PathBuf {
|
||||
@@ -214,7 +211,7 @@ pub fn logout(codex_home: &Path) -> std::io::Result<bool> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes an `auth.json` that contains only the API key. Intended for CLI use.
|
||||
/// Writes an `auth.json` that contains only the API key.
|
||||
pub fn login_with_api_key(codex_home: &Path, api_key: &str) -> std::io::Result<()> {
|
||||
let auth_dot_json = AuthDotJson {
|
||||
openai_api_key: Some(api_key.to_string()),
|
||||
@@ -224,28 +221,11 @@ pub fn login_with_api_key(codex_home: &Path, api_key: &str) -> std::io::Result<(
|
||||
write_auth_json(&get_auth_file(codex_home), &auth_dot_json)
|
||||
}
|
||||
|
||||
fn load_auth(
|
||||
codex_home: &Path,
|
||||
include_env_var: bool,
|
||||
preferred_auth_method: AuthMode,
|
||||
) -> std::io::Result<Option<CodexAuth>> {
|
||||
// First, check to see if there is a valid auth.json file. If not, we fall
|
||||
// back to AuthMode::ApiKey using the OPENAI_API_KEY environment variable
|
||||
// (if it is set).
|
||||
fn load_auth(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> {
|
||||
let auth_file = get_auth_file(codex_home);
|
||||
let client = crate::default_client::create_client();
|
||||
let auth_dot_json = match try_read_auth_json(&auth_file) {
|
||||
Ok(auth) => auth,
|
||||
// If auth.json does not exist, try to read the OPENAI_API_KEY from the
|
||||
// environment variable.
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound && include_env_var => {
|
||||
return match read_openai_api_key_from_env() {
|
||||
Some(api_key) => Ok(Some(CodexAuth::from_api_key_with_client(&api_key, client))),
|
||||
None => Ok(None),
|
||||
};
|
||||
}
|
||||
// Though if auth.json exists but is malformed, do not fall back to the
|
||||
// env var because the user may be expecting to use AuthMode::ChatGPT.
|
||||
Err(e) => {
|
||||
return Err(e);
|
||||
}
|
||||
@@ -257,32 +237,11 @@ fn load_auth(
|
||||
last_refresh,
|
||||
} = auth_dot_json;
|
||||
|
||||
// If the auth.json has an API key AND does not appear to be on a plan that
|
||||
// should prefer AuthMode::ChatGPT, use AuthMode::ApiKey.
|
||||
// Prefer AuthMode.ApiKey if it's set in the auth.json.
|
||||
if let Some(api_key) = &auth_json_api_key {
|
||||
// Should any of these be AuthMode::ChatGPT with the api_key set?
|
||||
// Does AuthMode::ChatGPT indicate that there is an auth.json that is
|
||||
// "refreshable" even if we are using the API key for auth?
|
||||
match &tokens {
|
||||
Some(tokens) => {
|
||||
if tokens.should_use_api_key(preferred_auth_method, tokens.is_openai_email()) {
|
||||
return Ok(Some(CodexAuth::from_api_key_with_client(api_key, client)));
|
||||
} else {
|
||||
// Ignore the API key and fall through to ChatGPT auth.
|
||||
}
|
||||
}
|
||||
None => {
|
||||
// We have an API key but no tokens in the auth.json file.
|
||||
// Perhaps the user ran `codex login --api-key <KEY>` or updated
|
||||
// auth.json by hand. Either way, let's assume they are trying
|
||||
// to use their API key.
|
||||
return Ok(Some(CodexAuth::from_api_key_with_client(api_key, client)));
|
||||
}
|
||||
}
|
||||
return Ok(Some(CodexAuth::from_api_key_with_client(api_key, client)));
|
||||
}
|
||||
|
||||
// For the AuthMode::ChatGPT variant, perhaps neither api_key nor
|
||||
// openai_api_key should exist?
|
||||
Ok(Some(CodexAuth {
|
||||
api_key: None,
|
||||
mode: AuthMode::ChatGPT,
|
||||
@@ -332,10 +291,10 @@ async fn update_tokens(
|
||||
let tokens = auth_dot_json.tokens.get_or_insert_with(TokenData::default);
|
||||
tokens.id_token = parse_id_token(&id_token).map_err(std::io::Error::other)?;
|
||||
if let Some(access_token) = access_token {
|
||||
tokens.access_token = access_token.to_string();
|
||||
tokens.access_token = access_token;
|
||||
}
|
||||
if let Some(refresh_token) = refresh_token {
|
||||
tokens.refresh_token = refresh_token.to_string();
|
||||
tokens.refresh_token = refresh_token;
|
||||
}
|
||||
auth_dot_json.last_refresh = Some(Utc::now());
|
||||
write_auth_json(auth_file, &auth_dot_json)?;
|
||||
@@ -412,7 +371,6 @@ use std::sync::RwLock;
|
||||
/// Internal cached auth state.
|
||||
#[derive(Clone, Debug)]
|
||||
struct CachedAuth {
|
||||
preferred_auth_mode: AuthMode,
|
||||
auth: Option<CodexAuth>,
|
||||
}
|
||||
|
||||
@@ -468,9 +426,7 @@ mod tests {
|
||||
auth_dot_json,
|
||||
auth_file: _,
|
||||
..
|
||||
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
} = super::load_auth(codex_home.path()).unwrap().unwrap();
|
||||
assert_eq!(None, api_key);
|
||||
assert_eq!(AuthMode::ChatGPT, mode);
|
||||
|
||||
@@ -499,88 +455,6 @@ mod tests {
|
||||
)
|
||||
}
|
||||
|
||||
/// Even if the OPENAI_API_KEY is set in auth.json, if the plan is not in
|
||||
/// [`TokenData::is_plan_that_should_use_api_key`], it should use
|
||||
/// [`AuthMode::ChatGPT`].
|
||||
#[tokio::test]
|
||||
async fn pro_account_with_api_key_still_uses_chatgpt_auth() {
|
||||
let codex_home = tempdir().unwrap();
|
||||
let fake_jwt = write_auth_file(
|
||||
AuthFileParams {
|
||||
openai_api_key: Some("sk-test-key".to_string()),
|
||||
chatgpt_plan_type: "pro".to_string(),
|
||||
},
|
||||
codex_home.path(),
|
||||
)
|
||||
.expect("failed to write auth file");
|
||||
|
||||
let CodexAuth {
|
||||
api_key,
|
||||
mode,
|
||||
auth_dot_json,
|
||||
auth_file: _,
|
||||
..
|
||||
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(None, api_key);
|
||||
assert_eq!(AuthMode::ChatGPT, mode);
|
||||
|
||||
let guard = auth_dot_json.lock().unwrap();
|
||||
let auth_dot_json = guard.as_ref().expect("AuthDotJson should exist");
|
||||
assert_eq!(
|
||||
&AuthDotJson {
|
||||
openai_api_key: None,
|
||||
tokens: Some(TokenData {
|
||||
id_token: IdTokenInfo {
|
||||
email: Some("user@example.com".to_string()),
|
||||
chatgpt_plan_type: Some(PlanType::Known(KnownPlan::Pro)),
|
||||
raw_jwt: fake_jwt,
|
||||
},
|
||||
access_token: "test-access-token".to_string(),
|
||||
refresh_token: "test-refresh-token".to_string(),
|
||||
account_id: None,
|
||||
}),
|
||||
last_refresh: Some(
|
||||
DateTime::parse_from_rfc3339(LAST_REFRESH)
|
||||
.unwrap()
|
||||
.with_timezone(&Utc)
|
||||
),
|
||||
},
|
||||
auth_dot_json
|
||||
)
|
||||
}
|
||||
|
||||
/// If the OPENAI_API_KEY is set in auth.json and it is an enterprise
|
||||
/// account, then it should use [`AuthMode::ApiKey`].
|
||||
#[tokio::test]
|
||||
async fn enterprise_account_with_api_key_uses_apikey_auth() {
|
||||
let codex_home = tempdir().unwrap();
|
||||
write_auth_file(
|
||||
AuthFileParams {
|
||||
openai_api_key: Some("sk-test-key".to_string()),
|
||||
chatgpt_plan_type: "enterprise".to_string(),
|
||||
},
|
||||
codex_home.path(),
|
||||
)
|
||||
.expect("failed to write auth file");
|
||||
|
||||
let CodexAuth {
|
||||
api_key,
|
||||
mode,
|
||||
auth_dot_json,
|
||||
auth_file: _,
|
||||
..
|
||||
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(Some("sk-test-key".to_string()), api_key);
|
||||
assert_eq!(AuthMode::ApiKey, mode);
|
||||
|
||||
let guard = auth_dot_json.lock().expect("should unwrap");
|
||||
assert!(guard.is_none(), "auth_dot_json should be None");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn loads_api_key_from_auth_json() {
|
||||
let dir = tempdir().unwrap();
|
||||
@@ -591,9 +465,7 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let auth = super::load_auth(dir.path(), false, AuthMode::ChatGPT)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let auth = super::load_auth(dir.path()).unwrap().unwrap();
|
||||
assert_eq!(auth.mode, AuthMode::ApiKey);
|
||||
assert_eq!(auth.api_key, Some("sk-test-key".to_string()));
|
||||
|
||||
@@ -683,26 +555,17 @@ impl AuthManager {
|
||||
/// preferred auth method. Errors loading auth are swallowed; `auth()` will
|
||||
/// simply return `None` in that case so callers can treat it as an
|
||||
/// unauthenticated state.
|
||||
pub fn new(codex_home: PathBuf, preferred_auth_mode: AuthMode) -> Self {
|
||||
let auth = CodexAuth::from_codex_home(&codex_home, preferred_auth_mode)
|
||||
.ok()
|
||||
.flatten();
|
||||
pub fn new(codex_home: PathBuf) -> Self {
|
||||
let auth = CodexAuth::from_codex_home(&codex_home).ok().flatten();
|
||||
Self {
|
||||
codex_home,
|
||||
inner: RwLock::new(CachedAuth {
|
||||
preferred_auth_mode,
|
||||
auth,
|
||||
}),
|
||||
inner: RwLock::new(CachedAuth { auth }),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an AuthManager with a specific CodexAuth, for testing only.
|
||||
pub fn from_auth_for_testing(auth: CodexAuth) -> Arc<Self> {
|
||||
let preferred_auth_mode = auth.mode;
|
||||
let cached = CachedAuth {
|
||||
preferred_auth_mode,
|
||||
auth: Some(auth),
|
||||
};
|
||||
let cached = CachedAuth { auth: Some(auth) };
|
||||
Arc::new(Self {
|
||||
codex_home: PathBuf::new(),
|
||||
inner: RwLock::new(cached),
|
||||
@@ -714,21 +577,10 @@ impl AuthManager {
|
||||
self.inner.read().ok().and_then(|c| c.auth.clone())
|
||||
}
|
||||
|
||||
/// Preferred auth method used when (re)loading.
|
||||
pub fn preferred_auth_method(&self) -> AuthMode {
|
||||
self.inner
|
||||
.read()
|
||||
.map(|c| c.preferred_auth_mode)
|
||||
.unwrap_or(AuthMode::ApiKey)
|
||||
}
|
||||
|
||||
/// Force a reload using the existing preferred auth method. Returns
|
||||
/// Force a reload of the auth information from auth.json. Returns
|
||||
/// whether the auth value changed.
|
||||
pub fn reload(&self) -> bool {
|
||||
let preferred = self.preferred_auth_method();
|
||||
let new_auth = CodexAuth::from_codex_home(&self.codex_home, preferred)
|
||||
.ok()
|
||||
.flatten();
|
||||
let new_auth = CodexAuth::from_codex_home(&self.codex_home).ok().flatten();
|
||||
if let Ok(mut guard) = self.inner.write() {
|
||||
let changed = !AuthManager::auths_equal(&guard.auth, &new_auth);
|
||||
guard.auth = new_auth;
|
||||
@@ -747,8 +599,8 @@ impl AuthManager {
|
||||
}
|
||||
|
||||
/// Convenience constructor returning an `Arc` wrapper.
|
||||
pub fn shared(codex_home: PathBuf, preferred_auth_mode: AuthMode) -> Arc<Self> {
|
||||
Arc::new(Self::new(codex_home, preferred_auth_mode))
|
||||
pub fn shared(codex_home: PathBuf) -> Arc<Self> {
|
||||
Arc::new(Self::new(codex_home))
|
||||
}
|
||||
|
||||
/// Attempt to refresh the current auth token (if any). On success, reload
|
||||
|
||||
@@ -41,6 +41,7 @@ use crate::model_provider_info::WireApi;
|
||||
use crate::openai_model_info::get_model_info;
|
||||
use crate::openai_tools::create_tools_json_for_responses_api;
|
||||
use crate::protocol::TokenUsage;
|
||||
use crate::token_data::PlanType;
|
||||
use crate::util::backoff;
|
||||
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
|
||||
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
|
||||
@@ -60,7 +61,7 @@ struct Error {
|
||||
message: Option<String>,
|
||||
|
||||
// Optional fields available on "usage_limit_reached" and "usage_not_included" errors
|
||||
plan_type: Option<String>,
|
||||
plan_type: Option<PlanType>,
|
||||
resets_in_seconds: Option<u64>,
|
||||
}
|
||||
|
||||
@@ -304,7 +305,7 @@ impl ModelClient {
|
||||
// token.
|
||||
let plan_type = error
|
||||
.plan_type
|
||||
.or_else(|| auth.and_then(|a| a.get_plan_type()));
|
||||
.or_else(|| auth.as_ref().and_then(|a| a.get_plan_type()));
|
||||
let resets_in_seconds = error.resets_in_seconds;
|
||||
return Err(CodexErr::UsageLimitReached(UsageLimitReachedError {
|
||||
plan_type,
|
||||
@@ -1037,4 +1038,37 @@ mod tests {
|
||||
let delay = try_parse_retry_after(&err);
|
||||
assert_eq!(delay, Some(Duration::from_secs_f64(1.898)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error_response_deserializes_old_schema_known_plan_type_and_serializes_back() {
|
||||
use crate::token_data::KnownPlan;
|
||||
use crate::token_data::PlanType;
|
||||
|
||||
let json = r#"{"error":{"type":"usage_limit_reached","plan_type":"pro","resets_in_seconds":3600}}"#;
|
||||
let resp: ErrorResponse =
|
||||
serde_json::from_str(json).expect("should deserialize old schema");
|
||||
|
||||
assert!(matches!(
|
||||
resp.error.plan_type,
|
||||
Some(PlanType::Known(KnownPlan::Pro))
|
||||
));
|
||||
|
||||
let plan_json = serde_json::to_string(&resp.error.plan_type).expect("serialize plan_type");
|
||||
assert_eq!(plan_json, "\"pro\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error_response_deserializes_old_schema_unknown_plan_type_and_serializes_back() {
|
||||
use crate::token_data::PlanType;
|
||||
|
||||
let json =
|
||||
r#"{"error":{"type":"usage_limit_reached","plan_type":"vip","resets_in_seconds":60}}"#;
|
||||
let resp: ErrorResponse =
|
||||
serde_json::from_str(json).expect("should deserialize old schema");
|
||||
|
||||
assert!(matches!(resp.error.plan_type, Some(PlanType::Unknown(ref s)) if s == "vip"));
|
||||
|
||||
let plan_json = serde_json::to_string(&resp.error.plan_type).expect("serialize plan_type");
|
||||
assert_eq!(plan_json, "\"vip\"");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,11 +19,13 @@ use codex_apply_patch::ApplyPatchAction;
|
||||
use codex_apply_patch::MaybeApplyPatchVerified;
|
||||
use codex_apply_patch::maybe_parse_apply_patch_verified;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
use codex_protocol::protocol::CompactedItem;
|
||||
use codex_protocol::protocol::ConversationPathResponseEvent;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::protocol::TaskStartedEvent;
|
||||
use codex_protocol::protocol::TurnAbortReason;
|
||||
use codex_protocol::protocol::TurnAbortedEvent;
|
||||
use codex_protocol::protocol::TurnContextItem;
|
||||
use futures::prelude::*;
|
||||
use mcp_types::CallToolResult;
|
||||
use serde::Deserialize;
|
||||
@@ -212,12 +214,7 @@ impl Codex {
|
||||
let conversation_id = session.conversation_id;
|
||||
|
||||
// This task will run until Op::Shutdown is received.
|
||||
tokio::spawn(submission_loop(
|
||||
session.clone(),
|
||||
turn_context,
|
||||
config,
|
||||
rx_sub,
|
||||
));
|
||||
tokio::spawn(submission_loop(session, turn_context, config, rx_sub));
|
||||
let codex = Codex {
|
||||
next_id: AtomicU64::new(0),
|
||||
tx_sub,
|
||||
@@ -468,6 +465,7 @@ impl Session {
|
||||
tools_config: ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &config.model_family,
|
||||
approval_policy,
|
||||
sandbox_policy: sandbox_policy.clone(),
|
||||
include_plan_tool: config.include_plan_tool,
|
||||
include_apply_patch_tool: config.include_apply_patch_tool,
|
||||
include_web_search_request: config.tools_web_search_request,
|
||||
@@ -1071,7 +1069,7 @@ impl AgentTask {
|
||||
id: self.sub_id,
|
||||
msg: EventMsg::TurnAborted(TurnAbortedEvent { reason }),
|
||||
};
|
||||
let sess = self.sess.clone();
|
||||
let sess = self.sess;
|
||||
tokio::spawn(async move {
|
||||
sess.send_event(event).await;
|
||||
});
|
||||
@@ -1147,6 +1145,7 @@ async fn submission_loop(
|
||||
let tools_config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &effective_family,
|
||||
approval_policy: new_approval_policy,
|
||||
sandbox_policy: new_sandbox_policy.clone(),
|
||||
include_plan_tool: config.include_plan_tool,
|
||||
include_apply_patch_tool: config.include_apply_patch_tool,
|
||||
include_web_search_request: config.tools_web_search_request,
|
||||
@@ -1184,18 +1183,26 @@ async fn submission_loop(
|
||||
{
|
||||
warn!("failed to persist overrides: {e:#}");
|
||||
}
|
||||
|
||||
if cwd.is_some() || approval_policy.is_some() || sandbox_policy.is_some() {
|
||||
sess.record_conversation_items(&[ResponseItem::from(EnvironmentContext::new(
|
||||
cwd,
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
// Shell is not configurable from turn to turn
|
||||
None,
|
||||
))])
|
||||
.await;
|
||||
}
|
||||
}
|
||||
Op::UserInput { items } => {
|
||||
submit_user_input(
|
||||
turn_context.cwd.clone(),
|
||||
turn_context.approval_policy,
|
||||
turn_context.sandbox_policy.clone(),
|
||||
&sess,
|
||||
&turn_context,
|
||||
sub.id.clone(),
|
||||
items,
|
||||
)
|
||||
.await;
|
||||
// attempt to inject input into current task
|
||||
if let Err(items) = sess.inject_input(items) {
|
||||
// no current task, spawn a new one
|
||||
let task =
|
||||
AgentTask::spawn(sess.clone(), Arc::clone(&turn_context), sub.id, items);
|
||||
sess.set_task(task);
|
||||
}
|
||||
}
|
||||
Op::UserTurn {
|
||||
items,
|
||||
@@ -1240,6 +1247,7 @@ async fn submission_loop(
|
||||
tools_config: ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy,
|
||||
sandbox_policy: sandbox_policy.clone(),
|
||||
include_plan_tool: config.include_plan_tool,
|
||||
include_apply_patch_tool: config.include_apply_patch_tool,
|
||||
include_web_search_request: config.tools_web_search_request,
|
||||
@@ -1256,16 +1264,11 @@ async fn submission_loop(
|
||||
shell_environment_policy: turn_context.shell_environment_policy.clone(),
|
||||
cwd,
|
||||
};
|
||||
submit_user_input(
|
||||
fresh_turn_context.cwd.clone(),
|
||||
fresh_turn_context.approval_policy,
|
||||
fresh_turn_context.sandbox_policy.clone(),
|
||||
&sess,
|
||||
&Arc::new(fresh_turn_context),
|
||||
sub.id.clone(),
|
||||
items,
|
||||
)
|
||||
.await;
|
||||
// TODO: record the new environment context in the conversation history
|
||||
// no current task, spawn a new one with the per‑turn context
|
||||
let task =
|
||||
AgentTask::spawn(sess.clone(), Arc::new(fresh_turn_context), sub.id, items);
|
||||
sess.set_task(task);
|
||||
}
|
||||
}
|
||||
Op::ExecApproval { id, decision } => match decision {
|
||||
@@ -1764,7 +1767,7 @@ async fn try_run_turn(
|
||||
}
|
||||
})
|
||||
.map(|call_id| ResponseItem::CustomToolCallOutput {
|
||||
call_id: call_id.clone(),
|
||||
call_id,
|
||||
output: "aborted".to_string(),
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
@@ -1780,6 +1783,15 @@ async fn try_run_turn(
|
||||
})
|
||||
};
|
||||
|
||||
let rollout_item = RolloutItem::TurnContext(TurnContextItem {
|
||||
cwd: turn_context.cwd.clone(),
|
||||
approval_policy: turn_context.approval_policy,
|
||||
sandbox_policy: turn_context.sandbox_policy.clone(),
|
||||
model: turn_context.client.get_model(),
|
||||
effort: turn_context.client.get_reasoning_effort(),
|
||||
summary: turn_context.client.get_reasoning_summary(),
|
||||
});
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
let mut stream = turn_context.client.clone().stream(&prompt).await?;
|
||||
|
||||
let mut output = Vec::new();
|
||||
@@ -1962,10 +1974,14 @@ async fn run_compact_task(
|
||||
|
||||
sess.remove_task(&sub_id);
|
||||
|
||||
{
|
||||
let rollout_item = {
|
||||
let mut state = sess.state.lock_unchecked();
|
||||
state.history.keep_last_messages(1);
|
||||
}
|
||||
RolloutItem::Compacted(CompactedItem {
|
||||
message: state.history.last_agent_message(),
|
||||
})
|
||||
};
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
|
||||
let event = Event {
|
||||
id: sub_id.clone(),
|
||||
@@ -2702,6 +2718,20 @@ async fn handle_sandbox_error(
|
||||
let sub_id = exec_command_context.sub_id.clone();
|
||||
let cwd = exec_command_context.cwd.clone();
|
||||
|
||||
// if the command timed out, we can simply return this failure to the model
|
||||
if matches!(error, SandboxErr::Timeout) {
|
||||
return ResponseInputItem::FunctionCallOutput {
|
||||
call_id,
|
||||
output: FunctionCallOutputPayload {
|
||||
content: format!(
|
||||
"command timed out after {} milliseconds",
|
||||
params.timeout_duration().as_millis()
|
||||
),
|
||||
success: Some(false),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Early out if either the user never wants to be asked for approval, or
|
||||
// we're letting the model manage escalation requests. Otherwise, continue
|
||||
match turn_context.approval_policy {
|
||||
@@ -2719,20 +2749,6 @@ async fn handle_sandbox_error(
|
||||
AskForApproval::UnlessTrusted | AskForApproval::OnFailure => (),
|
||||
}
|
||||
|
||||
// similarly, if the command timed out, we can simply return this failure to the model
|
||||
if matches!(error, SandboxErr::Timeout) {
|
||||
return ResponseInputItem::FunctionCallOutput {
|
||||
call_id,
|
||||
output: FunctionCallOutputPayload {
|
||||
content: format!(
|
||||
"command timed out after {} milliseconds",
|
||||
params.timeout_duration().as_millis()
|
||||
),
|
||||
success: Some(false),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Note that when `error` is `SandboxErr::Denied`, it could be a false
|
||||
// positive. That is, it may have exited with a non-zero exit code, not
|
||||
// because the sandbox denied it, but because that is its expected behavior,
|
||||
@@ -2827,30 +2843,6 @@ async fn handle_sandbox_error(
|
||||
}
|
||||
}
|
||||
|
||||
async fn submit_user_input(
|
||||
cwd: PathBuf,
|
||||
approval_policy: AskForApproval,
|
||||
sandbox_policy: SandboxPolicy,
|
||||
sess: &Arc<Session>,
|
||||
turn_context: &Arc<TurnContext>,
|
||||
sub_id: String,
|
||||
items: Vec<InputItem>,
|
||||
) {
|
||||
sess.record_conversation_items(&[ResponseItem::from(EnvironmentContext::new(
|
||||
Some(cwd),
|
||||
Some(approval_policy),
|
||||
Some(sandbox_policy),
|
||||
// Shell is not configurable from turn to turn
|
||||
None,
|
||||
))])
|
||||
.await;
|
||||
if let Err(items) = sess.inject_input(items) {
|
||||
// no current task, spawn a new one
|
||||
let task = AgentTask::spawn(Arc::clone(sess), Arc::clone(turn_context), sub_id, items);
|
||||
sess.set_task(task);
|
||||
}
|
||||
}
|
||||
|
||||
fn format_exec_output_str(exec_output: &ExecToolCallOutput) -> String {
|
||||
let ExecToolCallOutput {
|
||||
aggregated_output, ..
|
||||
@@ -3015,6 +3007,15 @@ async fn drain_to_completed(
|
||||
sub_id: &str,
|
||||
prompt: &Prompt,
|
||||
) -> CodexResult<()> {
|
||||
let rollout_item = RolloutItem::TurnContext(TurnContextItem {
|
||||
cwd: turn_context.cwd.clone(),
|
||||
approval_policy: turn_context.approval_policy,
|
||||
sandbox_policy: turn_context.sandbox_policy.clone(),
|
||||
model: turn_context.client.get_model(),
|
||||
effort: turn_context.client.get_reasoning_effort(),
|
||||
summary: turn_context.client.get_reasoning_summary(),
|
||||
});
|
||||
sess.persist_rollout_items(&[rollout_item]).await;
|
||||
let mut stream = turn_context.client.clone().stream(prompt).await?;
|
||||
loop {
|
||||
let maybe_event = stream.next().await;
|
||||
@@ -3148,7 +3149,7 @@ mod tests {
|
||||
exit_code: 0,
|
||||
stdout: StreamOutput::new(String::new()),
|
||||
stderr: StreamOutput::new(String::new()),
|
||||
aggregated_output: StreamOutput::new(full.clone()),
|
||||
aggregated_output: StreamOutput::new(full),
|
||||
duration: StdDuration::from_secs(1),
|
||||
};
|
||||
|
||||
@@ -3182,7 +3183,7 @@ mod tests {
|
||||
fn model_truncation_respects_byte_budget() {
|
||||
// Construct a large output (about 100kB) so byte budget dominates
|
||||
let big_line = "x".repeat(100);
|
||||
let full = std::iter::repeat_n(big_line.clone(), 1000)
|
||||
let full = std::iter::repeat_n(big_line, 1000)
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
|
||||
@@ -15,11 +15,11 @@ use crate::model_provider_info::built_in_model_providers;
|
||||
use crate::openai_model_info::get_model_info;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use anyhow::Context;
|
||||
use codex_protocol::config_types::ReasoningEffort;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::config_types::SandboxMode;
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use codex_protocol::mcp_protocol::Tools;
|
||||
use codex_protocol::mcp_protocol::UserSavedConfig;
|
||||
use dirs::home_dir;
|
||||
@@ -32,6 +32,7 @@ use toml::Value as TomlValue;
|
||||
use toml_edit::DocumentMut;
|
||||
|
||||
const OPENAI_DEFAULT_MODEL: &str = "gpt-5";
|
||||
pub const GPT5_HIGH_MODEL: &str = "gpt-5-high";
|
||||
|
||||
/// Maximum number of bytes of the documentation that will be embedded. Larger
|
||||
/// files are *silently truncated* to this size so we do not take up too much of
|
||||
@@ -129,9 +130,6 @@ pub struct Config {
|
||||
/// output will be hyperlinked using the specified URI scheme.
|
||||
pub file_opener: UriBasedFileOpener,
|
||||
|
||||
/// Collection of settings that are specific to the TUI.
|
||||
pub tui: Tui,
|
||||
|
||||
/// Path to the `codex-linux-sandbox` executable. This must be set if
|
||||
/// [`crate::exec::SandboxType::LinuxSeccomp`] is used. Note that this
|
||||
/// cannot be set in the config file: it must be set in code via
|
||||
@@ -167,9 +165,6 @@ pub struct Config {
|
||||
|
||||
pub tools_web_search_request: bool,
|
||||
|
||||
/// If set to `true`, the API key will be signed with the `originator` header.
|
||||
pub preferred_auth_method: AuthMode,
|
||||
|
||||
pub use_experimental_streamable_shell_tool: bool,
|
||||
|
||||
/// If set to `true`, used only the experimental unified exec tool.
|
||||
@@ -355,6 +350,107 @@ pub fn set_project_trusted(codex_home: &Path, project_path: &Path) -> anyhow::Re
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_profile_table<'a>(
|
||||
doc: &'a mut DocumentMut,
|
||||
profile_name: &str,
|
||||
) -> anyhow::Result<&'a mut toml_edit::Table> {
|
||||
let mut created_profiles_table = false;
|
||||
{
|
||||
let root = doc.as_table_mut();
|
||||
let needs_table = !root.contains_key("profiles")
|
||||
|| root
|
||||
.get("profiles")
|
||||
.and_then(|item| item.as_table())
|
||||
.is_none();
|
||||
if needs_table {
|
||||
root.insert("profiles", toml_edit::table());
|
||||
created_profiles_table = true;
|
||||
}
|
||||
}
|
||||
|
||||
let Some(profiles_table) = doc["profiles"].as_table_mut() else {
|
||||
return Err(anyhow::anyhow!(
|
||||
"profiles table missing after initialization"
|
||||
));
|
||||
};
|
||||
|
||||
if created_profiles_table {
|
||||
profiles_table.set_implicit(true);
|
||||
}
|
||||
|
||||
let needs_profile_table = !profiles_table.contains_key(profile_name)
|
||||
|| profiles_table
|
||||
.get(profile_name)
|
||||
.and_then(|item| item.as_table())
|
||||
.is_none();
|
||||
if needs_profile_table {
|
||||
profiles_table.insert(profile_name, toml_edit::table());
|
||||
}
|
||||
|
||||
let Some(profile_table) = profiles_table
|
||||
.get_mut(profile_name)
|
||||
.and_then(|item| item.as_table_mut())
|
||||
else {
|
||||
return Err(anyhow::anyhow!(format!(
|
||||
"profile table missing for {profile_name}"
|
||||
)));
|
||||
};
|
||||
|
||||
profile_table.set_implicit(false);
|
||||
Ok(profile_table)
|
||||
}
|
||||
|
||||
// TODO(jif) refactor config persistence.
|
||||
pub async fn persist_model_selection(
|
||||
codex_home: &Path,
|
||||
active_profile: Option<&str>,
|
||||
model: &str,
|
||||
effort: Option<ReasoningEffort>,
|
||||
) -> anyhow::Result<()> {
|
||||
let config_path = codex_home.join(CONFIG_TOML_FILE);
|
||||
let serialized = match tokio::fs::read_to_string(&config_path).await {
|
||||
Ok(contents) => contents,
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => String::new(),
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
|
||||
let mut doc = if serialized.is_empty() {
|
||||
DocumentMut::new()
|
||||
} else {
|
||||
serialized.parse::<DocumentMut>()?
|
||||
};
|
||||
|
||||
if let Some(profile_name) = active_profile {
|
||||
let profile_table = ensure_profile_table(&mut doc, profile_name)?;
|
||||
profile_table["model"] = toml_edit::value(model);
|
||||
if let Some(effort) = effort {
|
||||
profile_table["model_reasoning_effort"] = toml_edit::value(effort.to_string());
|
||||
}
|
||||
} else {
|
||||
let table = doc.as_table_mut();
|
||||
table["model"] = toml_edit::value(model);
|
||||
if let Some(effort) = effort {
|
||||
table["model_reasoning_effort"] = toml_edit::value(effort.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(jif) refactor the home creation
|
||||
tokio::fs::create_dir_all(codex_home)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to create Codex home directory at {}",
|
||||
codex_home.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
tokio::fs::write(&config_path, doc.to_string())
|
||||
.await
|
||||
.with_context(|| format!("failed to persist config.toml at {}", config_path.display()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply a single dotted-path override onto a TOML value.
|
||||
fn apply_toml_override(root: &mut TomlValue, path: &str, value: TomlValue) {
|
||||
use toml::value::Table;
|
||||
@@ -494,9 +590,6 @@ pub struct ConfigToml {
|
||||
|
||||
pub projects: Option<HashMap<String, ProjectConfig>>,
|
||||
|
||||
/// If set to `true`, the API key will be signed with the `originator` header.
|
||||
pub preferred_auth_method: Option<AuthMode>,
|
||||
|
||||
/// Nested tools section for feature toggles
|
||||
pub tools: Option<ToolsToml>,
|
||||
|
||||
@@ -811,7 +904,6 @@ impl Config {
|
||||
codex_home,
|
||||
history,
|
||||
file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode),
|
||||
tui: cfg.tui.unwrap_or_default(),
|
||||
codex_linux_sandbox_exe,
|
||||
|
||||
hide_agent_reasoning: cfg.hide_agent_reasoning.unwrap_or(false),
|
||||
@@ -837,13 +929,12 @@ impl Config {
|
||||
include_plan_tool: include_plan_tool.unwrap_or(false),
|
||||
include_apply_patch_tool: include_apply_patch_tool.unwrap_or(false),
|
||||
tools_web_search_request,
|
||||
preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT),
|
||||
use_experimental_streamable_shell_tool: cfg
|
||||
.experimental_use_exec_command_tool
|
||||
.unwrap_or(false),
|
||||
use_experimental_unified_exec_tool: cfg
|
||||
.experimental_use_unified_exec_tool
|
||||
.unwrap_or(true),
|
||||
.unwrap_or(false),
|
||||
include_view_image_tool,
|
||||
active_profile: active_profile_name,
|
||||
disable_paste_burst: cfg.disable_paste_burst.unwrap_or(false),
|
||||
@@ -956,6 +1047,7 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
@@ -1046,6 +1138,145 @@ exclude_slash_tmp = true
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn persist_model_selection_updates_defaults() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
persist_model_selection(
|
||||
codex_home.path(),
|
||||
None,
|
||||
"gpt-5-high-new",
|
||||
Some(ReasoningEffort::High),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let serialized =
|
||||
tokio::fs::read_to_string(codex_home.path().join(CONFIG_TOML_FILE)).await?;
|
||||
let parsed: ConfigToml = toml::from_str(&serialized)?;
|
||||
|
||||
assert_eq!(parsed.model.as_deref(), Some("gpt-5-high-new"));
|
||||
assert_eq!(parsed.model_reasoning_effort, Some(ReasoningEffort::High));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn persist_model_selection_overwrites_existing_model() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
|
||||
tokio::fs::write(
|
||||
&config_path,
|
||||
r#"
|
||||
model = "gpt-5"
|
||||
model_reasoning_effort = "medium"
|
||||
|
||||
[profiles.dev]
|
||||
model = "gpt-4.1"
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
persist_model_selection(
|
||||
codex_home.path(),
|
||||
None,
|
||||
"o4-mini",
|
||||
Some(ReasoningEffort::High),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let serialized = tokio::fs::read_to_string(config_path).await?;
|
||||
let parsed: ConfigToml = toml::from_str(&serialized)?;
|
||||
|
||||
assert_eq!(parsed.model.as_deref(), Some("o4-mini"));
|
||||
assert_eq!(parsed.model_reasoning_effort, Some(ReasoningEffort::High));
|
||||
assert_eq!(
|
||||
parsed
|
||||
.profiles
|
||||
.get("dev")
|
||||
.and_then(|profile| profile.model.as_deref()),
|
||||
Some("gpt-4.1"),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn persist_model_selection_updates_profile() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
|
||||
persist_model_selection(
|
||||
codex_home.path(),
|
||||
Some("dev"),
|
||||
"gpt-5-high-new",
|
||||
Some(ReasoningEffort::Low),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let serialized =
|
||||
tokio::fs::read_to_string(codex_home.path().join(CONFIG_TOML_FILE)).await?;
|
||||
let parsed: ConfigToml = toml::from_str(&serialized)?;
|
||||
let profile = parsed
|
||||
.profiles
|
||||
.get("dev")
|
||||
.expect("profile should be created");
|
||||
|
||||
assert_eq!(profile.model.as_deref(), Some("gpt-5-high-new"));
|
||||
assert_eq!(profile.model_reasoning_effort, Some(ReasoningEffort::Low));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn persist_model_selection_updates_existing_profile() -> anyhow::Result<()> {
|
||||
let codex_home = TempDir::new()?;
|
||||
let config_path = codex_home.path().join(CONFIG_TOML_FILE);
|
||||
|
||||
tokio::fs::write(
|
||||
&config_path,
|
||||
r#"
|
||||
[profiles.dev]
|
||||
model = "gpt-4"
|
||||
model_reasoning_effort = "medium"
|
||||
|
||||
[profiles.prod]
|
||||
model = "gpt-5"
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
persist_model_selection(
|
||||
codex_home.path(),
|
||||
Some("dev"),
|
||||
"o4-high",
|
||||
Some(ReasoningEffort::Medium),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let serialized = tokio::fs::read_to_string(config_path).await?;
|
||||
let parsed: ConfigToml = toml::from_str(&serialized)?;
|
||||
|
||||
let dev_profile = parsed
|
||||
.profiles
|
||||
.get("dev")
|
||||
.expect("dev profile should survive updates");
|
||||
assert_eq!(dev_profile.model.as_deref(), Some("o4-high"));
|
||||
assert_eq!(
|
||||
dev_profile.model_reasoning_effort,
|
||||
Some(ReasoningEffort::Medium)
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
parsed
|
||||
.profiles
|
||||
.get("prod")
|
||||
.and_then(|profile| profile.model.as_deref()),
|
||||
Some("gpt-5"),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct PrecedenceTestFixture {
|
||||
cwd: TempDir,
|
||||
codex_home: TempDir,
|
||||
@@ -1204,7 +1435,6 @@ model_verbosity = "high"
|
||||
codex_home: fixture.codex_home(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
tui: Tui::default(),
|
||||
codex_linux_sandbox_exe: None,
|
||||
hide_agent_reasoning: false,
|
||||
show_raw_agent_reasoning: false,
|
||||
@@ -1217,9 +1447,8 @@ model_verbosity = "high"
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: true,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
include_view_image_tool: true,
|
||||
active_profile: Some("o3".to_string()),
|
||||
disable_paste_burst: false,
|
||||
@@ -1262,7 +1491,6 @@ model_verbosity = "high"
|
||||
codex_home: fixture.codex_home(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
tui: Tui::default(),
|
||||
codex_linux_sandbox_exe: None,
|
||||
hide_agent_reasoning: false,
|
||||
show_raw_agent_reasoning: false,
|
||||
@@ -1275,9 +1503,8 @@ model_verbosity = "high"
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: true,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
include_view_image_tool: true,
|
||||
active_profile: Some("gpt3".to_string()),
|
||||
disable_paste_burst: false,
|
||||
@@ -1335,7 +1562,6 @@ model_verbosity = "high"
|
||||
codex_home: fixture.codex_home(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
tui: Tui::default(),
|
||||
codex_linux_sandbox_exe: None,
|
||||
hide_agent_reasoning: false,
|
||||
show_raw_agent_reasoning: false,
|
||||
@@ -1348,9 +1574,8 @@ model_verbosity = "high"
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: true,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
include_view_image_tool: true,
|
||||
active_profile: Some("zdr".to_string()),
|
||||
disable_paste_burst: false,
|
||||
@@ -1394,7 +1619,6 @@ model_verbosity = "high"
|
||||
codex_home: fixture.codex_home(),
|
||||
history: History::default(),
|
||||
file_opener: UriBasedFileOpener::VsCode,
|
||||
tui: Tui::default(),
|
||||
codex_linux_sandbox_exe: None,
|
||||
hide_agent_reasoning: false,
|
||||
show_raw_agent_reasoning: false,
|
||||
@@ -1407,9 +1631,8 @@ model_verbosity = "high"
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
tools_web_search_request: false,
|
||||
preferred_auth_method: AuthMode::ChatGPT,
|
||||
use_experimental_streamable_shell_tool: false,
|
||||
use_experimental_unified_exec_tool: true,
|
||||
use_experimental_unified_exec_tool: false,
|
||||
include_view_image_tool: true,
|
||||
active_profile: Some("gpt5".to_string()),
|
||||
disable_paste_burst: false,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
|
||||
/// Transcript of conversation history
|
||||
@@ -59,6 +60,26 @@ impl ConversationHistory {
|
||||
kept.reverse();
|
||||
self.items = kept;
|
||||
}
|
||||
|
||||
pub(crate) fn last_agent_message(&self) -> String {
|
||||
for item in self.items.iter().rev() {
|
||||
if let ResponseItem::Message { role, content, .. } = item
|
||||
&& role == "assistant"
|
||||
{
|
||||
return content
|
||||
.iter()
|
||||
.find_map(|ci| {
|
||||
if let ContentItem::OutputText { text } = ci {
|
||||
Some(text.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or_default();
|
||||
}
|
||||
}
|
||||
String::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Anything that is not a system message or "reasoning" message is considered
|
||||
|
||||
@@ -63,7 +63,7 @@ impl EnvironmentContext {
|
||||
if writable_roots.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(writable_roots.clone())
|
||||
Some(writable_roots)
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use crate::token_data::KnownPlan;
|
||||
use crate::token_data::PlanType;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
use reqwest::StatusCode;
|
||||
use serde_json;
|
||||
@@ -127,38 +129,58 @@ pub enum CodexErr {
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct UsageLimitReachedError {
|
||||
pub plan_type: Option<String>,
|
||||
pub resets_in_seconds: Option<u64>,
|
||||
pub(crate) plan_type: Option<PlanType>,
|
||||
pub(crate) resets_in_seconds: Option<u64>,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for UsageLimitReachedError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// Base message differs slightly for legacy ChatGPT Plus plan users.
|
||||
if let Some(plan_type) = &self.plan_type
|
||||
&& plan_type == "plus"
|
||||
{
|
||||
write!(
|
||||
f,
|
||||
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing) or try again"
|
||||
)?;
|
||||
if let Some(secs) = self.resets_in_seconds {
|
||||
let reset_duration = format_reset_duration(secs);
|
||||
write!(f, " in {reset_duration}.")?;
|
||||
} else {
|
||||
write!(f, " later.")?;
|
||||
let message = match self.plan_type.as_ref() {
|
||||
Some(PlanType::Known(KnownPlan::Plus)) => format!(
|
||||
"You've hit your usage limit. Upgrade to Pro (https://openai.com/chatgpt/pricing){}",
|
||||
retry_suffix_after_or(self.resets_in_seconds)
|
||||
),
|
||||
Some(PlanType::Known(KnownPlan::Team)) | Some(PlanType::Known(KnownPlan::Business)) => {
|
||||
format!(
|
||||
"You've hit your usage limit. To get more access now, send a request to your admin{}",
|
||||
retry_suffix_after_or(self.resets_in_seconds)
|
||||
)
|
||||
}
|
||||
} else {
|
||||
write!(f, "You've hit your usage limit.")?;
|
||||
|
||||
if let Some(secs) = self.resets_in_seconds {
|
||||
let reset_duration = format_reset_duration(secs);
|
||||
write!(f, " Try again in {reset_duration}.")?;
|
||||
} else {
|
||||
write!(f, " Try again later.")?;
|
||||
Some(PlanType::Known(KnownPlan::Free)) => {
|
||||
"To use Codex with your ChatGPT plan, upgrade to Plus: https://openai.com/chatgpt/pricing."
|
||||
.to_string()
|
||||
}
|
||||
}
|
||||
Some(PlanType::Known(KnownPlan::Pro))
|
||||
| Some(PlanType::Known(KnownPlan::Enterprise))
|
||||
| Some(PlanType::Known(KnownPlan::Edu)) => format!(
|
||||
"You've hit your usage limit.{}",
|
||||
retry_suffix(self.resets_in_seconds)
|
||||
),
|
||||
Some(PlanType::Unknown(_)) | None => format!(
|
||||
"You've hit your usage limit.{}",
|
||||
retry_suffix(self.resets_in_seconds)
|
||||
),
|
||||
};
|
||||
|
||||
Ok(())
|
||||
write!(f, "{message}")
|
||||
}
|
||||
}
|
||||
|
||||
fn retry_suffix(resets_in_seconds: Option<u64>) -> String {
|
||||
if let Some(secs) = resets_in_seconds {
|
||||
let reset_duration = format_reset_duration(secs);
|
||||
format!(" Try again in {reset_duration}.")
|
||||
} else {
|
||||
" Try again later.".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
fn retry_suffix_after_or(resets_in_seconds: Option<u64>) -> String {
|
||||
if let Some(secs) = resets_in_seconds {
|
||||
let reset_duration = format_reset_duration(secs);
|
||||
format!(" or try again in {reset_duration}.")
|
||||
} else {
|
||||
" or try again later.".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -237,7 +259,7 @@ mod tests {
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_plus_plan() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some("plus".to_string()),
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Plus)),
|
||||
resets_in_seconds: None,
|
||||
};
|
||||
assert_eq!(
|
||||
@@ -246,6 +268,18 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_free_plan() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Free)),
|
||||
resets_in_seconds: Some(3600),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"To use Codex with your ChatGPT plan, upgrade to Plus: https://openai.com/chatgpt/pricing."
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_default_when_none() {
|
||||
let err = UsageLimitReachedError {
|
||||
@@ -258,10 +292,34 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_team_plan() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Team)),
|
||||
resets_in_seconds: Some(3600),
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"You've hit your usage limit. To get more access now, send a request to your admin or try again in 1 hour."
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_business_plan_without_reset() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Business)),
|
||||
resets_in_seconds: None,
|
||||
};
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"You've hit your usage limit. To get more access now, send a request to your admin or try again later."
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn usage_limit_reached_error_formats_default_for_other_plans() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some("pro".to_string()),
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Pro)),
|
||||
resets_in_seconds: None,
|
||||
};
|
||||
assert_eq!(
|
||||
@@ -285,7 +343,7 @@ mod tests {
|
||||
#[test]
|
||||
fn usage_limit_reached_includes_hours_and_minutes() {
|
||||
let err = UsageLimitReachedError {
|
||||
plan_type: Some("plus".to_string()),
|
||||
plan_type: Some(PlanType::Known(KnownPlan::Plus)),
|
||||
resets_in_seconds: Some(3 * 3600 + 32 * 60),
|
||||
};
|
||||
assert_eq!(
|
||||
|
||||
@@ -159,7 +159,7 @@ mod tests {
|
||||
EventMsg::UserMessage(user) => {
|
||||
assert_eq!(user.message, "Hello world");
|
||||
assert!(matches!(user.kind, Some(InputMessageKind::Plain)));
|
||||
assert_eq!(user.images, Some(vec![img1.clone(), img2.clone()]));
|
||||
assert_eq!(user.images, Some(vec![img1, img2]));
|
||||
}
|
||||
other => panic!("expected UserMessage, got {other:?}"),
|
||||
}
|
||||
|
||||
@@ -802,7 +802,7 @@ mod tests {
|
||||
async fn resolve_root_git_project_for_trust_regular_repo_returns_repo_root() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
||||
let repo_path = create_test_git_repo(&temp_dir).await;
|
||||
let expected = std::fs::canonicalize(&repo_path).unwrap().to_path_buf();
|
||||
let expected = std::fs::canonicalize(&repo_path).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
resolve_root_git_project_for_trust(&repo_path),
|
||||
@@ -810,10 +810,7 @@ mod tests {
|
||||
);
|
||||
let nested = repo_path.join("sub/dir");
|
||||
std::fs::create_dir_all(&nested).unwrap();
|
||||
assert_eq!(
|
||||
resolve_root_git_project_for_trust(&nested),
|
||||
Some(expected.clone())
|
||||
);
|
||||
assert_eq!(resolve_root_git_project_for_trust(&nested), Some(expected));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
68
codex-rs/core/src/internal_storage.rs
Normal file
68
codex-rs/core/src/internal_storage.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
use anyhow::Context;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub(crate) const INTERNAL_STORAGE_FILE: &str = "internal_storage.json";
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct InternalStorage {
|
||||
#[serde(skip)]
|
||||
storage_path: PathBuf,
|
||||
#[serde(default)]
|
||||
pub gpt_5_high_model_prompt_seen: bool,
|
||||
}
|
||||
|
||||
// TODO(jif) generalise all the file writers and build proper async channel inserters.
|
||||
impl InternalStorage {
|
||||
pub fn load(codex_home: &Path) -> Self {
|
||||
let storage_path = codex_home.join(INTERNAL_STORAGE_FILE);
|
||||
|
||||
match std::fs::read_to_string(&storage_path) {
|
||||
Ok(serialized) => match serde_json::from_str::<Self>(&serialized) {
|
||||
Ok(mut storage) => {
|
||||
storage.storage_path = storage_path;
|
||||
storage
|
||||
}
|
||||
Err(error) => {
|
||||
tracing::warn!("failed to parse internal storage: {error:?}");
|
||||
Self::empty(storage_path)
|
||||
}
|
||||
},
|
||||
Err(error) => {
|
||||
tracing::warn!("failed to read internal storage: {error:?}");
|
||||
Self::empty(storage_path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn empty(storage_path: PathBuf) -> Self {
|
||||
Self {
|
||||
storage_path,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn persist(&self) -> anyhow::Result<()> {
|
||||
let serialized = serde_json::to_string_pretty(self)?;
|
||||
|
||||
if let Some(parent) = self.storage_path.parent() {
|
||||
tokio::fs::create_dir_all(parent).await.with_context(|| {
|
||||
format!(
|
||||
"failed to create internal storage directory at {}",
|
||||
parent.display()
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
tokio::fs::write(&self.storage_path, serialized)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to persist internal storage at {}",
|
||||
self.storage_path.display()
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -28,6 +28,7 @@ mod exec_command;
|
||||
pub mod exec_env;
|
||||
mod flags;
|
||||
pub mod git_info;
|
||||
pub mod internal_storage;
|
||||
mod is_safe_command;
|
||||
pub mod landlock;
|
||||
mod mcp_connection_manager;
|
||||
@@ -74,6 +75,7 @@ pub use rollout::list::ConversationsPage;
|
||||
pub use rollout::list::Cursor;
|
||||
mod user_notification;
|
||||
pub mod util;
|
||||
|
||||
pub use apply_patch::CODEX_APPLY_PATCH_ARG1;
|
||||
pub use safety::get_platform_sandbox;
|
||||
// Re-export the protocol types from the standalone `codex-protocol` crate so existing
|
||||
|
||||
@@ -80,7 +80,10 @@ pub struct ModelProviderInfo {
|
||||
/// the connection as lost.
|
||||
pub stream_idle_timeout_ms: Option<u64>,
|
||||
|
||||
/// Whether this provider requires some form of standard authentication (API key, ChatGPT token).
|
||||
/// Does this provider require an OpenAI API Key or ChatGPT login token? If true,
|
||||
/// user is presented with login screen on first run, and login preference and token/key
|
||||
/// are stored in auth.json. If false (which is the default), login screen is skipped,
|
||||
/// and API key (if needed) comes from the "env_key" environment variable.
|
||||
#[serde(default)]
|
||||
pub requires_openai_auth: bool,
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ pub(crate) fn get_model_info(model_family: &ModelFamily) -> Option<ModelInfo> {
|
||||
max_output_tokens: 4_096,
|
||||
}),
|
||||
|
||||
"gpt-5" => Some(ModelInfo {
|
||||
_ if slug.starts_with("gpt-5") => Some(ModelInfo {
|
||||
context_window: 272_000,
|
||||
max_output_tokens: 128_000,
|
||||
}),
|
||||
|
||||
@@ -8,6 +8,7 @@ use std::collections::HashMap;
|
||||
use crate::model_family::ModelFamily;
|
||||
use crate::plan_tool::PLAN_TOOL;
|
||||
use crate::protocol::AskForApproval;
|
||||
use crate::protocol::SandboxPolicy;
|
||||
use crate::tool_apply_patch::ApplyPatchToolType;
|
||||
use crate::tool_apply_patch::create_apply_patch_freeform_tool;
|
||||
use crate::tool_apply_patch::create_apply_patch_json_tool;
|
||||
@@ -57,7 +58,7 @@ pub(crate) enum OpenAiTool {
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ConfigShellToolType {
|
||||
DefaultShell,
|
||||
ShellWithRequest,
|
||||
ShellWithRequest { sandbox_policy: SandboxPolicy },
|
||||
LocalShell,
|
||||
StreamableShell,
|
||||
}
|
||||
@@ -75,6 +76,7 @@ pub(crate) struct ToolsConfig {
|
||||
pub(crate) struct ToolsConfigParams<'a> {
|
||||
pub(crate) model_family: &'a ModelFamily,
|
||||
pub(crate) approval_policy: AskForApproval,
|
||||
pub(crate) sandbox_policy: SandboxPolicy,
|
||||
pub(crate) include_plan_tool: bool,
|
||||
pub(crate) include_apply_patch_tool: bool,
|
||||
pub(crate) include_web_search_request: bool,
|
||||
@@ -88,6 +90,7 @@ impl ToolsConfig {
|
||||
let ToolsConfigParams {
|
||||
model_family,
|
||||
approval_policy,
|
||||
sandbox_policy,
|
||||
include_plan_tool,
|
||||
include_apply_patch_tool,
|
||||
include_web_search_request,
|
||||
@@ -103,7 +106,9 @@ impl ToolsConfig {
|
||||
ConfigShellToolType::DefaultShell
|
||||
};
|
||||
if matches!(approval_policy, AskForApproval::OnRequest) && !use_streamable_shell_tool {
|
||||
shell_type = ConfigShellToolType::ShellWithRequest;
|
||||
shell_type = ConfigShellToolType::ShellWithRequest {
|
||||
sandbox_policy: sandbox_policy.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
let apply_patch_tool_type = match model_family.apply_patch_tool_type {
|
||||
@@ -246,9 +251,7 @@ fn create_unified_exec_tool() -> OpenAiTool {
|
||||
})
|
||||
}
|
||||
|
||||
const SHELL_TOOL_DESCRIPTION: &str = r#"Runs a shell command and returns its output"#;
|
||||
|
||||
fn create_shell_tool_for_sandbox() -> OpenAiTool {
|
||||
fn create_shell_tool_for_sandbox(sandbox_policy: &SandboxPolicy) -> OpenAiTool {
|
||||
let mut properties = BTreeMap::new();
|
||||
properties.insert(
|
||||
"command".to_string(),
|
||||
@@ -260,29 +263,79 @@ fn create_shell_tool_for_sandbox() -> OpenAiTool {
|
||||
properties.insert(
|
||||
"workdir".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Working directory to execute the command in.".to_string()),
|
||||
description: Some("The working directory to execute the command in".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"timeout_ms".to_string(),
|
||||
JsonSchema::Number {
|
||||
description: Some("Timeout for the command in milliseconds.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"with_escalated_permissions".to_string(),
|
||||
JsonSchema::Boolean {
|
||||
description: Some("Request escalated permissions, only for when a command would otherwise be blocked by the sandbox.".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"justification".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Required if and only if with_escalated_permissions == true. One sentence explaining why escalation is needed (e.g., write outside CWD, network fetch, git commit).".to_string()),
|
||||
description: Some("The timeout for the command in milliseconds".to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
let description = SHELL_TOOL_DESCRIPTION.to_string();
|
||||
if matches!(sandbox_policy, SandboxPolicy::WorkspaceWrite { .. }) {
|
||||
properties.insert(
|
||||
"with_escalated_permissions".to_string(),
|
||||
JsonSchema::Boolean {
|
||||
description: Some("Whether to request escalated permissions. Set to true if command needs to be run without sandbox restrictions".to_string()),
|
||||
},
|
||||
);
|
||||
properties.insert(
|
||||
"justification".to_string(),
|
||||
JsonSchema::String {
|
||||
description: Some("Only set if with_escalated_permissions is true. 1-sentence explanation of why we want to run this command.".to_string()),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let description = match sandbox_policy {
|
||||
SandboxPolicy::WorkspaceWrite {
|
||||
network_access,
|
||||
..
|
||||
} => {
|
||||
let network_line = if !network_access {
|
||||
"\n - Commands that require network access"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
|
||||
format!(
|
||||
r#"
|
||||
The shell tool is used to execute shell commands.
|
||||
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands will require escalated privileges:
|
||||
- Types of actions that require escalated privileges:
|
||||
- Writing files other than those in the writable roots (see the environment context for the allowed directories){network_line}
|
||||
- Examples of commands that require escalated privileges:
|
||||
- git commit
|
||||
- npm install or pnpm install
|
||||
- cargo build
|
||||
- cargo test
|
||||
- When invoking a command that will require escalated privileges:
|
||||
- Provide the with_escalated_permissions parameter with the boolean value true
|
||||
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter."#,
|
||||
)
|
||||
}
|
||||
SandboxPolicy::DangerFullAccess => {
|
||||
"Runs a shell command and returns its output.".to_string()
|
||||
}
|
||||
SandboxPolicy::ReadOnly => {
|
||||
r#"
|
||||
The shell tool is used to execute shell commands.
|
||||
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands (including apply_patch) will require escalated permissions:
|
||||
- Types of actions that require escalated privileges:
|
||||
- Writing files
|
||||
- Applying patches
|
||||
- Examples of commands that require escalated privileges:
|
||||
- apply_patch
|
||||
- git commit
|
||||
- npm install or pnpm install
|
||||
- cargo build
|
||||
- cargo test
|
||||
- When invoking a command that will require escalated privileges:
|
||||
- Provide the with_escalated_permissions parameter with the boolean value true
|
||||
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter"#.to_string()
|
||||
}
|
||||
};
|
||||
|
||||
OpenAiTool::Function(ResponsesApiTool {
|
||||
name: "shell".to_string(),
|
||||
@@ -295,6 +348,7 @@ fn create_shell_tool_for_sandbox() -> OpenAiTool {
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn create_view_image_tool() -> OpenAiTool {
|
||||
// Support only local filesystem path.
|
||||
let mut properties = BTreeMap::new();
|
||||
@@ -535,8 +589,8 @@ pub(crate) fn get_openai_tools(
|
||||
ConfigShellToolType::DefaultShell => {
|
||||
tools.push(create_shell_tool());
|
||||
}
|
||||
ConfigShellToolType::ShellWithRequest => {
|
||||
tools.push(create_shell_tool_for_sandbox());
|
||||
ConfigShellToolType::ShellWithRequest { sandbox_policy } => {
|
||||
tools.push(create_shell_tool_for_sandbox(sandbox_policy));
|
||||
}
|
||||
ConfigShellToolType::LocalShell => {
|
||||
tools.push(OpenAiTool::LocalShell {});
|
||||
@@ -632,6 +686,7 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: true,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -653,6 +708,7 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: true,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -674,6 +730,7 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -779,6 +836,7 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: false,
|
||||
@@ -856,6 +914,7 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -918,6 +977,7 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -975,6 +1035,7 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -1035,6 +1096,7 @@ mod tests {
|
||||
let config = ToolsConfig::new(&ToolsConfigParams {
|
||||
model_family: &model_family,
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
include_plan_tool: false,
|
||||
include_apply_patch_tool: false,
|
||||
include_web_search_request: true,
|
||||
@@ -1088,7 +1150,13 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_shell_tool_for_sandbox_workspace_write() {
|
||||
let tool = super::create_shell_tool_for_sandbox();
|
||||
let sandbox_policy = SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec!["workspace".into()],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: false,
|
||||
exclude_slash_tmp: false,
|
||||
};
|
||||
let tool = super::create_shell_tool_for_sandbox(&sandbox_policy);
|
||||
let OpenAiTool::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
@@ -1097,13 +1165,26 @@ mod tests {
|
||||
};
|
||||
assert_eq!(name, "shell");
|
||||
|
||||
let expected = super::SHELL_TOOL_DESCRIPTION;
|
||||
let expected = r#"
|
||||
The shell tool is used to execute shell commands.
|
||||
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands will require escalated privileges:
|
||||
- Types of actions that require escalated privileges:
|
||||
- Writing files other than those in the writable roots (see the environment context for the allowed directories)
|
||||
- Commands that require network access
|
||||
- Examples of commands that require escalated privileges:
|
||||
- git commit
|
||||
- npm install or pnpm install
|
||||
- cargo build
|
||||
- cargo test
|
||||
- When invoking a command that will require escalated privileges:
|
||||
- Provide the with_escalated_permissions parameter with the boolean value true
|
||||
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter."#;
|
||||
assert_eq!(description, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shell_tool_for_sandbox_readonly() {
|
||||
let tool = super::create_shell_tool_for_sandbox();
|
||||
let tool = super::create_shell_tool_for_sandbox(&SandboxPolicy::ReadOnly);
|
||||
let OpenAiTool::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
@@ -1112,13 +1193,27 @@ mod tests {
|
||||
};
|
||||
assert_eq!(name, "shell");
|
||||
|
||||
let expected = super::SHELL_TOOL_DESCRIPTION;
|
||||
let expected = r#"
|
||||
The shell tool is used to execute shell commands.
|
||||
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands (including apply_patch) will require escalated permissions:
|
||||
- Types of actions that require escalated privileges:
|
||||
- Writing files
|
||||
- Applying patches
|
||||
- Examples of commands that require escalated privileges:
|
||||
- apply_patch
|
||||
- git commit
|
||||
- npm install or pnpm install
|
||||
- cargo build
|
||||
- cargo test
|
||||
- When invoking a command that will require escalated privileges:
|
||||
- Provide the with_escalated_permissions parameter with the boolean value true
|
||||
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter"#;
|
||||
assert_eq!(description, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shell_tool_for_sandbox_danger_full_access() {
|
||||
let tool = super::create_shell_tool_for_sandbox();
|
||||
let tool = super::create_shell_tool_for_sandbox(&SandboxPolicy::DangerFullAccess);
|
||||
let OpenAiTool::Function(ResponsesApiTool {
|
||||
description, name, ..
|
||||
}) = &tool
|
||||
@@ -1127,7 +1222,6 @@ mod tests {
|
||||
};
|
||||
assert_eq!(name, "shell");
|
||||
|
||||
let expected = super::SHELL_TOOL_DESCRIPTION;
|
||||
assert_eq!(description, expected);
|
||||
assert_eq!(description, "Runs a shell command and returns its output.");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -868,7 +868,7 @@ pub fn parse_command_impl(command: &[String]) -> Vec<ParsedCommand> {
|
||||
let parts = if contains_connectors(&normalized) {
|
||||
split_on_connectors(&normalized)
|
||||
} else {
|
||||
vec![normalized.clone()]
|
||||
vec![normalized]
|
||||
};
|
||||
|
||||
// Preserve left-to-right execution order for all commands, including bash -c/-lc
|
||||
@@ -1201,10 +1201,7 @@ fn parse_bash_lc_commands(original: &[String]) -> Option<Vec<ParsedCommand>> {
|
||||
name,
|
||||
}
|
||||
} else {
|
||||
ParsedCommand::Read {
|
||||
cmd: cmd.clone(),
|
||||
name,
|
||||
}
|
||||
ParsedCommand::Read { cmd, name }
|
||||
}
|
||||
} else {
|
||||
ParsedCommand::Read {
|
||||
@@ -1215,10 +1212,7 @@ fn parse_bash_lc_commands(original: &[String]) -> Option<Vec<ParsedCommand>> {
|
||||
}
|
||||
ParsedCommand::ListFiles { path, cmd, .. } => {
|
||||
if had_connectors {
|
||||
ParsedCommand::ListFiles {
|
||||
cmd: cmd.clone(),
|
||||
path,
|
||||
}
|
||||
ParsedCommand::ListFiles { cmd, path }
|
||||
} else {
|
||||
ParsedCommand::ListFiles {
|
||||
cmd: shlex_join(&script_tokens),
|
||||
@@ -1230,11 +1224,7 @@ fn parse_bash_lc_commands(original: &[String]) -> Option<Vec<ParsedCommand>> {
|
||||
query, path, cmd, ..
|
||||
} => {
|
||||
if had_connectors {
|
||||
ParsedCommand::Search {
|
||||
cmd: cmd.clone(),
|
||||
query,
|
||||
path,
|
||||
}
|
||||
ParsedCommand::Search { cmd, query, path }
|
||||
} else {
|
||||
ParsedCommand::Search {
|
||||
cmd: shlex_join(&script_tokens),
|
||||
|
||||
@@ -115,7 +115,7 @@ pub fn discover_project_doc_paths(config: &Config) -> std::io::Result<Vec<PathBu
|
||||
// Build chain from cwd upwards and detect git root.
|
||||
let mut chain: Vec<PathBuf> = vec![dir.clone()];
|
||||
let mut git_root: Option<PathBuf> = None;
|
||||
let mut cursor = dir.clone();
|
||||
let mut cursor = dir;
|
||||
while let Some(parent) = cursor.parent() {
|
||||
let git_marker = cursor.join(".git");
|
||||
let git_exists = match std::fs::metadata(&git_marker) {
|
||||
|
||||
@@ -318,6 +318,12 @@ async fn read_head_and_flags(
|
||||
head.push(val);
|
||||
}
|
||||
}
|
||||
RolloutItem::TurnContext(_) => {
|
||||
// Not included in `head`; skip.
|
||||
}
|
||||
RolloutItem::Compacted(_) => {
|
||||
// Not included in `head`; skip.
|
||||
}
|
||||
RolloutItem::EventMsg(ev) => {
|
||||
if matches!(ev, EventMsg::UserMessage(_)) {
|
||||
saw_user_event = true;
|
||||
|
||||
@@ -8,8 +8,10 @@ pub(crate) fn is_persisted_response_item(item: &RolloutItem) -> bool {
|
||||
match item {
|
||||
RolloutItem::ResponseItem(item) => should_persist_response_item(item),
|
||||
RolloutItem::EventMsg(ev) => should_persist_event_msg(ev),
|
||||
// Always persist session meta
|
||||
RolloutItem::SessionMeta(_) => true,
|
||||
// Persist Codex executive markers so we can analyze flows (e.g., compaction, API turns).
|
||||
RolloutItem::Compacted(_) | RolloutItem::TurnContext(_) | RolloutItem::SessionMeta(_) => {
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -238,6 +238,12 @@ impl RolloutRecorder {
|
||||
RolloutItem::ResponseItem(item) => {
|
||||
items.push(RolloutItem::ResponseItem(item));
|
||||
}
|
||||
RolloutItem::Compacted(item) => {
|
||||
items.push(RolloutItem::Compacted(item));
|
||||
}
|
||||
RolloutItem::TurnContext(item) => {
|
||||
items.push(RolloutItem::TurnContext(item));
|
||||
}
|
||||
RolloutItem::EventMsg(_ev) => {
|
||||
items.push(RolloutItem::EventMsg(_ev));
|
||||
}
|
||||
|
||||
@@ -305,7 +305,7 @@ async fn test_pagination_cursor() {
|
||||
path: p1,
|
||||
head: head_1,
|
||||
}],
|
||||
next_cursor: Some(expected_cursor3.clone()),
|
||||
next_cursor: Some(expected_cursor3),
|
||||
num_scanned_files: 5, // scanned 05, 04 (anchor), 03, 02 (anchor), 01
|
||||
reached_scan_cap: false,
|
||||
};
|
||||
@@ -344,7 +344,7 @@ async fn test_get_conversation_contents() {
|
||||
let expected_cursor: Cursor = serde_json::from_str(&format!("\"{ts}|{uuid}\"")).unwrap();
|
||||
let expected_page = ConversationsPage {
|
||||
items: vec![ConversationItem {
|
||||
path: expected_path.clone(),
|
||||
path: expected_path,
|
||||
head: expected_head,
|
||||
}],
|
||||
next_cursor: Some(expected_cursor),
|
||||
@@ -437,7 +437,7 @@ async fn test_stable_ordering_same_second_pagination() {
|
||||
path: p1,
|
||||
head: head(u1),
|
||||
}],
|
||||
next_cursor: Some(expected_cursor2.clone()),
|
||||
next_cursor: Some(expected_cursor2),
|
||||
num_scanned_files: 3, // scanned u3, u2 (anchor), u1
|
||||
reached_scan_cap: false,
|
||||
};
|
||||
|
||||
@@ -293,7 +293,7 @@ mod tests {
|
||||
// With the parent dir explicitly added as a writable root, the
|
||||
// outside write should be permitted.
|
||||
let policy_with_parent = SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![parent.clone()],
|
||||
writable_roots: vec![parent],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: true,
|
||||
exclude_slash_tmp: true,
|
||||
|
||||
@@ -153,7 +153,7 @@ mod tests {
|
||||
// Build a policy that only includes the two test roots as writable and
|
||||
// does not automatically include defaults TMPDIR or /tmp.
|
||||
let policy = SandboxPolicy::WorkspaceWrite {
|
||||
writable_roots: vec![root_with_git.clone(), root_without_git.clone()],
|
||||
writable_roots: vec![root_with_git, root_without_git],
|
||||
network_access: false,
|
||||
exclude_tmpdir_env_var: true,
|
||||
exclude_slash_tmp: true,
|
||||
|
||||
@@ -326,10 +326,7 @@ mod tests {
|
||||
.format_default_shell_invocation(input.iter().map(|s| s.to_string()).collect());
|
||||
let expected_cmd = expected_cmd
|
||||
.iter()
|
||||
.map(|s| {
|
||||
s.replace("BASHRC_PATH", bashrc_path.to_str().unwrap())
|
||||
.to_string()
|
||||
})
|
||||
.map(|s| s.replace("BASHRC_PATH", bashrc_path.to_str().unwrap()))
|
||||
.collect();
|
||||
|
||||
assert_eq!(actual_cmd, Some(expected_cmd));
|
||||
@@ -435,10 +432,7 @@ mod macos_tests {
|
||||
.format_default_shell_invocation(input.iter().map(|s| s.to_string()).collect());
|
||||
let expected_cmd = expected_cmd
|
||||
.iter()
|
||||
.map(|s| {
|
||||
s.replace("ZSHRC_PATH", zshrc_path.to_str().unwrap())
|
||||
.to_string()
|
||||
})
|
||||
.map(|s| s.replace("ZSHRC_PATH", zshrc_path.to_str().unwrap()))
|
||||
.collect();
|
||||
|
||||
assert_eq!(actual_cmd, Some(expected_cmd));
|
||||
|
||||
@@ -3,8 +3,6 @@ use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use thiserror::Error;
|
||||
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Default)]
|
||||
pub struct TokenData {
|
||||
/// Flat info parsed from the JWT in auth.json.
|
||||
@@ -22,36 +20,6 @@ pub struct TokenData {
|
||||
pub account_id: Option<String>,
|
||||
}
|
||||
|
||||
impl TokenData {
|
||||
/// Returns true if this is a plan that should use the traditional
|
||||
/// "metered" billing via an API key.
|
||||
pub(crate) fn should_use_api_key(
|
||||
&self,
|
||||
preferred_auth_method: AuthMode,
|
||||
is_openai_email: bool,
|
||||
) -> bool {
|
||||
if preferred_auth_method == AuthMode::ApiKey {
|
||||
return true;
|
||||
}
|
||||
// If the email is an OpenAI email, use AuthMode::ChatGPT unless preferred_auth_method is AuthMode::ApiKey.
|
||||
if is_openai_email {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.id_token
|
||||
.chatgpt_plan_type
|
||||
.as_ref()
|
||||
.is_none_or(|plan| plan.is_plan_that_should_use_api_key())
|
||||
}
|
||||
|
||||
pub fn is_openai_email(&self) -> bool {
|
||||
self.id_token
|
||||
.email
|
||||
.as_deref()
|
||||
.is_some_and(|email| email.trim().to_ascii_lowercase().ends_with("@openai.com"))
|
||||
}
|
||||
}
|
||||
|
||||
/// Flat subset of useful claims in id_token from auth.json.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)]
|
||||
pub struct IdTokenInfo {
|
||||
@@ -79,28 +47,6 @@ pub(crate) enum PlanType {
|
||||
Unknown(String),
|
||||
}
|
||||
|
||||
impl PlanType {
|
||||
fn is_plan_that_should_use_api_key(&self) -> bool {
|
||||
match self {
|
||||
Self::Known(known) => {
|
||||
use KnownPlan::*;
|
||||
!matches!(known, Free | Plus | Pro | Team)
|
||||
}
|
||||
Self::Unknown(_) => {
|
||||
// Unknown plans should use the API key.
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_string(&self) -> String {
|
||||
match self {
|
||||
Self::Known(known) => format!("{known:?}").to_lowercase(),
|
||||
Self::Unknown(s) => s.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub(crate) enum KnownPlan {
|
||||
|
||||
@@ -678,7 +678,7 @@ index {left_oid}..{right_oid}
|
||||
let dest = dir.path().join("dest.txt");
|
||||
let mut acc = TurnDiffTracker::new();
|
||||
let mv = HashMap::from([(
|
||||
src.clone(),
|
||||
src,
|
||||
FileChange::Update {
|
||||
unified_diff: "".into(),
|
||||
move_path: Some(dest.clone()),
|
||||
|
||||
@@ -586,26 +586,6 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test]
|
||||
async fn correct_path_resolution() -> Result<(), UnifiedExecError> {
|
||||
let manager = UnifiedExecSessionManager::default();
|
||||
let result = manager
|
||||
.handle_request(UnifiedExecRequest {
|
||||
session_id: None,
|
||||
input_chunks: &["echo".to_string(), "codex".to_string()],
|
||||
timeout_ms: Some(1_500),
|
||||
})
|
||||
.await?;
|
||||
|
||||
assert!(result.session_id.is_none());
|
||||
assert!(result.output.contains("codex"));
|
||||
|
||||
assert!(manager.sessions.lock().await.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn reusing_completed_session_returns_unknown_session() -> Result<(), UnifiedExecError> {
|
||||
|
||||
@@ -8,7 +8,6 @@ use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::load_sse_fixture_with_id;
|
||||
use core_test_support::wait_for_event;
|
||||
@@ -277,25 +276,7 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
|
||||
"content": [{ "type": "input_text", "text": "hello" }]
|
||||
}
|
||||
]);
|
||||
let input_array = request_body
|
||||
.get("input")
|
||||
.and_then(|v| v.as_array())
|
||||
.cloned()
|
||||
.expect("input array in request body");
|
||||
let filtered: Vec<serde_json::Value> = input_array
|
||||
.into_iter()
|
||||
.filter(|item| {
|
||||
let text = item
|
||||
.get("content")
|
||||
.and_then(|c| c.as_array())
|
||||
.and_then(|a| a.first())
|
||||
.and_then(|o| o.get("text"))
|
||||
.and_then(|t| t.as_str())
|
||||
.unwrap_or("");
|
||||
!text.contains("<environment_context>")
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(serde_json::json!(filtered), expected_input);
|
||||
assert_eq!(request_body["input"], expected_input);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
@@ -507,79 +488,6 @@ async fn chatgpt_auth_sends_correct_request() {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn prefers_chatgpt_token_when_config_prefers_chatgpt() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
println!(
|
||||
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Mock server
|
||||
let server = MockServer::start().await;
|
||||
|
||||
let first = ResponseTemplate::new(200)
|
||||
.insert_header("content-type", "text/event-stream")
|
||||
.set_body_raw(sse_completed("resp1"), "text/event-stream");
|
||||
|
||||
// Expect ChatGPT base path and correct headers
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/v1/responses"))
|
||||
.and(header_regex("Authorization", r"Bearer Access-123"))
|
||||
.and(header_regex("chatgpt-account-id", r"acc-123"))
|
||||
.respond_with(first)
|
||||
.expect(1)
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let model_provider = ModelProviderInfo {
|
||||
base_url: Some(format!("{}/v1", server.uri())),
|
||||
..built_in_model_providers()["openai"].clone()
|
||||
};
|
||||
|
||||
// Init session
|
||||
let codex_home = TempDir::new().unwrap();
|
||||
// Write auth.json that contains both API key and ChatGPT tokens for a plan that should prefer ChatGPT.
|
||||
let _jwt = write_auth_json(
|
||||
&codex_home,
|
||||
Some("sk-test-key"),
|
||||
"pro",
|
||||
"Access-123",
|
||||
Some("acc-123"),
|
||||
);
|
||||
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = model_provider;
|
||||
config.preferred_auth_method = AuthMode::ChatGPT;
|
||||
|
||||
let auth_manager =
|
||||
match CodexAuth::from_codex_home(codex_home.path(), config.preferred_auth_method) {
|
||||
Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth),
|
||||
Ok(None) => panic!("No CodexAuth found in codex_home"),
|
||||
Err(e) => panic!("Failed to load CodexAuth: {e}"),
|
||||
};
|
||||
let conversation_manager = ConversationManager::new(auth_manager);
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
..
|
||||
} = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.expect("create new conversation");
|
||||
|
||||
codex
|
||||
.submit(Op::UserInput {
|
||||
items: vec![InputItem::Text {
|
||||
text: "hello".into(),
|
||||
}],
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
|
||||
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||||
@@ -624,14 +532,12 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
|
||||
|
||||
let mut config = load_default_config_for_test(&codex_home);
|
||||
config.model_provider = model_provider;
|
||||
config.preferred_auth_method = AuthMode::ApiKey;
|
||||
|
||||
let auth_manager =
|
||||
match CodexAuth::from_codex_home(codex_home.path(), config.preferred_auth_method) {
|
||||
Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth),
|
||||
Ok(None) => panic!("No CodexAuth found in codex_home"),
|
||||
Err(e) => panic!("Failed to load CodexAuth: {e}"),
|
||||
};
|
||||
let auth_manager = match CodexAuth::from_codex_home(codex_home.path()) {
|
||||
Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth),
|
||||
Ok(None) => panic!("No CodexAuth found in codex_home"),
|
||||
Err(e) => panic!("Failed to load CodexAuth: {e}"),
|
||||
};
|
||||
let conversation_manager = ConversationManager::new(auth_manager);
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
@@ -968,6 +874,34 @@ async fn history_dedupes_streamed_and_final_messages_across_turns() {
|
||||
assert_eq!(requests.len(), 3, "expected 3 requests (one per turn)");
|
||||
|
||||
// Replace full-array compare with tail-only raw JSON compare using a single hard-coded value.
|
||||
let r3_tail_expected = json!([
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{"type":"input_text","text":"U1"}]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [{"type":"output_text","text":"Hey there!\n"}]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{"type":"input_text","text":"U2"}]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [{"type":"output_text","text":"Hey there!\n"}]
|
||||
},
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [{"type":"input_text","text":"U3"}]
|
||||
}
|
||||
]);
|
||||
|
||||
let r3_input_array = requests[2]
|
||||
.body_json::<serde_json::Value>()
|
||||
.unwrap()
|
||||
@@ -975,60 +909,12 @@ async fn history_dedupes_streamed_and_final_messages_across_turns() {
|
||||
.and_then(|v| v.as_array())
|
||||
.cloned()
|
||||
.expect("r3 missing input array");
|
||||
// We only assert on the last 5 items of the input history for request 3.
|
||||
// With per-turn environment context injected, the last 5 should be:
|
||||
// [env_ctx, U2, assistant("Hey there!\n"), env_ctx, U3]
|
||||
let actual_tail = &r3_input_array[r3_input_array.len() - 5..];
|
||||
|
||||
// env_ctx 1
|
||||
assert_eq!(actual_tail[0]["type"], serde_json::json!("message"));
|
||||
assert_eq!(actual_tail[0]["role"], serde_json::json!("user"));
|
||||
let env_text_1 = &actual_tail[0]["content"][0]["text"];
|
||||
assert!(
|
||||
env_text_1
|
||||
.as_str()
|
||||
.expect("env text should be string")
|
||||
.contains("<environment_context>")
|
||||
);
|
||||
|
||||
// U2
|
||||
// skipping earlier context and developer messages
|
||||
let tail_len = r3_tail_expected.as_array().unwrap().len();
|
||||
let actual_tail = &r3_input_array[r3_input_array.len() - tail_len..];
|
||||
assert_eq!(
|
||||
actual_tail[1],
|
||||
serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": "U2" } ]
|
||||
})
|
||||
);
|
||||
|
||||
// assistant response
|
||||
assert_eq!(
|
||||
actual_tail[2],
|
||||
serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [ { "type": "output_text", "text": "Hey there!\n" } ]
|
||||
})
|
||||
);
|
||||
|
||||
// env_ctx 2
|
||||
assert_eq!(actual_tail[3]["type"], serde_json::json!("message"));
|
||||
assert_eq!(actual_tail[3]["role"], serde_json::json!("user"));
|
||||
let env_text_2 = &actual_tail[3]["content"][0]["text"];
|
||||
assert!(
|
||||
env_text_2
|
||||
.as_str()
|
||||
.expect("env text should be string")
|
||||
.contains("<environment_context>")
|
||||
);
|
||||
|
||||
// U3
|
||||
assert_eq!(
|
||||
actual_tail[4],
|
||||
serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": "U3" } ]
|
||||
})
|
||||
serde_json::Value::Array(actual_tail.to_vec()),
|
||||
r3_tail_expected,
|
||||
"request 3 tail mismatch",
|
||||
);
|
||||
}
|
||||
|
||||
@@ -3,10 +3,13 @@
|
||||
use codex_core::CodexAuth;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::ModelProviderInfo;
|
||||
use codex_core::NewConversation;
|
||||
use codex_core::built_in_model_providers;
|
||||
use codex_core::protocol::EventMsg;
|
||||
use codex_core::protocol::InputItem;
|
||||
use codex_core::protocol::Op;
|
||||
use codex_core::protocol::RolloutItem;
|
||||
use codex_core::protocol::RolloutLine;
|
||||
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
|
||||
use core_test_support::load_default_config_for_test;
|
||||
use core_test_support::wait_for_event;
|
||||
@@ -142,11 +145,12 @@ async fn summarize_context_three_requests_and_instructions() {
|
||||
let mut config = load_default_config_for_test(&home);
|
||||
config.model_provider = model_provider;
|
||||
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
|
||||
let codex = conversation_manager
|
||||
.new_conversation(config)
|
||||
.await
|
||||
.unwrap()
|
||||
.conversation;
|
||||
let NewConversation {
|
||||
conversation: codex,
|
||||
session_configured,
|
||||
..
|
||||
} = conversation_manager.new_conversation(config).await.unwrap();
|
||||
let rollout_path = session_configured.rollout_path;
|
||||
|
||||
// 1) Normal user input – should hit server once.
|
||||
codex
|
||||
@@ -248,4 +252,47 @@ async fn summarize_context_three_requests_and_instructions() {
|
||||
!messages.iter().any(|(_, t)| t.contains(SUMMARIZE_TRIGGER)),
|
||||
"third request should not include the summarize trigger"
|
||||
);
|
||||
|
||||
// Shut down Codex to flush rollout entries before inspecting the file.
|
||||
codex.submit(Op::Shutdown).await.unwrap();
|
||||
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await;
|
||||
|
||||
// Verify rollout contains APITurn entries for each API call and a Compacted entry.
|
||||
let text = std::fs::read_to_string(&rollout_path).unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"failed to read rollout file {}: {e}",
|
||||
rollout_path.display()
|
||||
)
|
||||
});
|
||||
let mut api_turn_count = 0usize;
|
||||
let mut saw_compacted_summary = false;
|
||||
for line in text.lines() {
|
||||
let trimmed = line.trim();
|
||||
if trimmed.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let Ok(entry): Result<RolloutLine, _> = serde_json::from_str(trimmed) else {
|
||||
continue;
|
||||
};
|
||||
match entry.item {
|
||||
RolloutItem::TurnContext(_) => {
|
||||
api_turn_count += 1;
|
||||
}
|
||||
RolloutItem::Compacted(ci) => {
|
||||
if ci.message == SUMMARY_TEXT {
|
||||
saw_compacted_summary = true;
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
assert!(
|
||||
api_turn_count == 3,
|
||||
"expected three APITurn entries in rollout"
|
||||
);
|
||||
assert!(
|
||||
saw_compacted_summary,
|
||||
"expected a Compacted entry containing the summarizer output"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -191,8 +191,7 @@ async fn prompt_tools_are_consistent_across_requests() {
|
||||
let expected_instructions: &str = include_str!("../../prompt.md");
|
||||
// our internal implementation is responsible for keeping tools in sync
|
||||
// with the OpenAI schema, so we just verify the tool presence here
|
||||
let expected_tools_names: &[&str] =
|
||||
&["unified_exec", "update_plan", "apply_patch", "view_image"];
|
||||
let expected_tools_names: &[&str] = &["shell", "update_plan", "apply_patch", "view_image"];
|
||||
let body0 = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
assert_eq!(
|
||||
body0["instructions"],
|
||||
@@ -272,7 +271,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
|
||||
let shell = default_user_shell().await;
|
||||
|
||||
let expected_env_text_init = format!(
|
||||
let expected_env_text = format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
@@ -285,28 +284,13 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
None => String::new(),
|
||||
}
|
||||
);
|
||||
// Per-turn environment context omits the shell tag.
|
||||
let expected_env_text_turn = format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
<approval_policy>on-request</approval_policy>
|
||||
<sandbox_mode>read-only</sandbox_mode>
|
||||
<network_access>restricted</network_access>
|
||||
</environment_context>"#,
|
||||
cwd.path().to_string_lossy(),
|
||||
);
|
||||
let expected_ui_text =
|
||||
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
|
||||
|
||||
let expected_env_msg_init = serde_json::json!({
|
||||
let expected_env_msg = serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": expected_env_text_init } ]
|
||||
});
|
||||
let expected_env_msg_turn = serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": expected_env_text_turn } ]
|
||||
"content": [ { "type": "input_text", "text": expected_env_text } ]
|
||||
});
|
||||
let expected_ui_msg = serde_json::json!({
|
||||
"type": "message",
|
||||
@@ -322,12 +306,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
|
||||
assert_eq!(
|
||||
body1["input"],
|
||||
serde_json::json!([
|
||||
expected_ui_msg,
|
||||
expected_env_msg_init,
|
||||
expected_env_msg_turn,
|
||||
expected_user_message_1
|
||||
])
|
||||
serde_json::json!([expected_ui_msg, expected_env_msg, expected_user_message_1])
|
||||
);
|
||||
|
||||
let expected_user_message_2 = serde_json::json!({
|
||||
@@ -339,7 +318,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
|
||||
let expected_body2 = serde_json::json!(
|
||||
[
|
||||
body1["input"].as_array().unwrap().as_slice(),
|
||||
[expected_env_msg_turn, expected_user_message_2].as_slice(),
|
||||
[expected_user_message_2].as_slice(),
|
||||
]
|
||||
.concat()
|
||||
);
|
||||
@@ -567,24 +546,10 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": "hello 2" } ]
|
||||
});
|
||||
let expected_env_text_2 = format!(
|
||||
r#"<environment_context>
|
||||
<cwd>{}</cwd>
|
||||
<approval_policy>never</approval_policy>
|
||||
<sandbox_mode>workspace-write</sandbox_mode>
|
||||
<network_access>enabled</network_access>
|
||||
</environment_context>"#,
|
||||
new_cwd.path().to_string_lossy()
|
||||
);
|
||||
let expected_env_msg_2 = serde_json::json!({
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": [ { "type": "input_text", "text": expected_env_text_2 } ]
|
||||
});
|
||||
let expected_body2 = serde_json::json!(
|
||||
[
|
||||
body1["input"].as_array().unwrap().as_slice(),
|
||||
[expected_env_msg_2, expected_user_message_2].as_slice(),
|
||||
[expected_user_message_2].as_slice(),
|
||||
]
|
||||
.concat()
|
||||
);
|
||||
|
||||
@@ -280,7 +280,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
parsed_cmd: _,
|
||||
}) => {
|
||||
self.call_id_to_command.insert(
|
||||
call_id.clone(),
|
||||
call_id,
|
||||
ExecCommandBegin {
|
||||
command: command.clone(),
|
||||
},
|
||||
@@ -382,7 +382,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
// Store metadata so we can calculate duration later when we
|
||||
// receive the corresponding PatchApplyEnd event.
|
||||
self.call_id_to_patch.insert(
|
||||
call_id.clone(),
|
||||
call_id,
|
||||
PatchApplyBegin {
|
||||
start_time: Instant::now(),
|
||||
auto_approved,
|
||||
|
||||
@@ -187,10 +187,8 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
let conversation_manager = ConversationManager::new(AuthManager::shared(
|
||||
config.codex_home.clone(),
|
||||
config.preferred_auth_method,
|
||||
));
|
||||
let conversation_manager =
|
||||
ConversationManager::new(AuthManager::shared(config.codex_home.clone()));
|
||||
let NewConversation {
|
||||
conversation_id: _,
|
||||
conversation,
|
||||
|
||||
@@ -61,7 +61,7 @@ pub(crate) async fn run_e2e_exec_test(cwd: &Path, response_streams: Vec<String>)
|
||||
.context("should find binary for codex-exec")
|
||||
.expect("should find binary for codex-exec")
|
||||
.current_dir(cwd.clone())
|
||||
.env("CODEX_HOME", cwd.clone())
|
||||
.env("CODEX_HOME", cwd)
|
||||
.env("OPENAI_API_KEY", "dummy")
|
||||
.env("OPENAI_BASE_URL", format!("{uri}/v1"))
|
||||
.arg("--skip-git-repo-check")
|
||||
|
||||
@@ -88,7 +88,7 @@ impl ExecvChecker {
|
||||
let mut program = valid_exec.program.to_string();
|
||||
for system_path in valid_exec.system_path {
|
||||
if is_executable_file(&system_path) {
|
||||
program = system_path.to_string();
|
||||
program = system_path;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -196,7 +196,7 @@ system_path=[{fake_cp:?}]
|
||||
let checker = setup(&fake_cp);
|
||||
let exec_call = ExecCall {
|
||||
program: "cp".into(),
|
||||
args: vec![source.clone(), dest.clone()],
|
||||
args: vec![source, dest.clone()],
|
||||
};
|
||||
let valid_exec = match checker.r#match(&exec_call)? {
|
||||
MatchedExec::Match { exec } => exec,
|
||||
@@ -207,7 +207,7 @@ system_path=[{fake_cp:?}]
|
||||
assert_eq!(
|
||||
checker.check(valid_exec.clone(), &cwd, &[], &[]),
|
||||
Err(ReadablePathNotInReadableFolders {
|
||||
file: source_path.clone(),
|
||||
file: source_path,
|
||||
folders: vec![]
|
||||
}),
|
||||
);
|
||||
@@ -229,7 +229,7 @@ system_path=[{fake_cp:?}]
|
||||
// Both readable and writeable folders specified.
|
||||
assert_eq!(
|
||||
checker.check(
|
||||
valid_exec.clone(),
|
||||
valid_exec,
|
||||
&cwd,
|
||||
std::slice::from_ref(&root_path),
|
||||
std::slice::from_ref(&root_path)
|
||||
@@ -241,7 +241,7 @@ system_path=[{fake_cp:?}]
|
||||
// folders.
|
||||
let exec_call_folders_as_args = ExecCall {
|
||||
program: "cp".into(),
|
||||
args: vec![root.clone(), root.clone()],
|
||||
args: vec![root.clone(), root],
|
||||
};
|
||||
let valid_exec_call_folders_as_args = match checker.r#match(&exec_call_folders_as_args)? {
|
||||
MatchedExec::Match { exec } => exec,
|
||||
@@ -254,7 +254,7 @@ system_path=[{fake_cp:?}]
|
||||
std::slice::from_ref(&root_path),
|
||||
std::slice::from_ref(&root_path)
|
||||
),
|
||||
Ok(cp.clone()),
|
||||
Ok(cp),
|
||||
);
|
||||
|
||||
// Specify a parent of a readable folder as input.
|
||||
|
||||
@@ -104,7 +104,7 @@ impl PolicyBuilder {
|
||||
info!("adding program spec: {program_spec:?}");
|
||||
let name = program_spec.program.clone();
|
||||
let mut programs = self.programs.borrow_mut();
|
||||
programs.insert(name.clone(), program_spec);
|
||||
programs.insert(name, program_spec);
|
||||
}
|
||||
|
||||
fn add_forbidden_substrings(&self, substrings: &[String]) {
|
||||
|
||||
@@ -42,7 +42,7 @@ impl ServerOptions {
|
||||
pub fn new(codex_home: PathBuf, client_id: String) -> Self {
|
||||
Self {
|
||||
codex_home,
|
||||
client_id: client_id.to_string(),
|
||||
client_id,
|
||||
issuer: DEFAULT_ISSUER.to_string(),
|
||||
port: DEFAULT_PORT,
|
||||
open_browser: true,
|
||||
@@ -126,7 +126,7 @@ pub fn run_login_server(opts: ServerOptions) -> io::Result<LoginServer> {
|
||||
let shutdown_notify = Arc::new(tokio::sync::Notify::new());
|
||||
let server_handle = {
|
||||
let shutdown_notify = shutdown_notify.clone();
|
||||
let server = server.clone();
|
||||
let server = server;
|
||||
tokio::spawn(async move {
|
||||
let result = loop {
|
||||
tokio::select! {
|
||||
|
||||
@@ -12,6 +12,7 @@ use codex_core::RolloutRecorder;
|
||||
use codex_core::SessionMeta;
|
||||
use codex_core::auth::CLIENT_ID;
|
||||
use codex_core::auth::get_auth_file;
|
||||
use codex_core::auth::login_with_api_key;
|
||||
use codex_core::auth::try_read_auth_json;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
@@ -39,7 +40,6 @@ use codex_protocol::mcp_protocol::ApplyPatchApprovalParams;
|
||||
use codex_protocol::mcp_protocol::ApplyPatchApprovalResponse;
|
||||
use codex_protocol::mcp_protocol::ArchiveConversationParams;
|
||||
use codex_protocol::mcp_protocol::ArchiveConversationResponse;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use codex_protocol::mcp_protocol::AuthStatusChangeNotification;
|
||||
use codex_protocol::mcp_protocol::ClientRequest;
|
||||
use codex_protocol::mcp_protocol::ConversationId;
|
||||
@@ -57,6 +57,8 @@ use codex_protocol::mcp_protocol::InterruptConversationParams;
|
||||
use codex_protocol::mcp_protocol::InterruptConversationResponse;
|
||||
use codex_protocol::mcp_protocol::ListConversationsParams;
|
||||
use codex_protocol::mcp_protocol::ListConversationsResponse;
|
||||
use codex_protocol::mcp_protocol::LoginApiKeyParams;
|
||||
use codex_protocol::mcp_protocol::LoginApiKeyResponse;
|
||||
use codex_protocol::mcp_protocol::LoginChatGptCompleteNotification;
|
||||
use codex_protocol::mcp_protocol::LoginChatGptResponse;
|
||||
use codex_protocol::mcp_protocol::NewConversationParams;
|
||||
@@ -172,6 +174,9 @@ impl CodexMessageProcessor {
|
||||
ClientRequest::GitDiffToRemote { request_id, params } => {
|
||||
self.git_diff_to_origin(request_id, params.cwd).await;
|
||||
}
|
||||
ClientRequest::LoginApiKey { request_id, params } => {
|
||||
self.login_api_key(request_id, params).await;
|
||||
}
|
||||
ClientRequest::LoginChatGpt { request_id } => {
|
||||
self.login_chatgpt(request_id).await;
|
||||
}
|
||||
@@ -199,6 +204,39 @@ impl CodexMessageProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
async fn login_api_key(&mut self, request_id: RequestId, params: LoginApiKeyParams) {
|
||||
{
|
||||
let mut guard = self.active_login.lock().await;
|
||||
if let Some(active) = guard.take() {
|
||||
active.drop();
|
||||
}
|
||||
}
|
||||
|
||||
match login_with_api_key(&self.config.codex_home, ¶ms.api_key) {
|
||||
Ok(()) => {
|
||||
self.auth_manager.reload();
|
||||
self.outgoing
|
||||
.send_response(request_id, LoginApiKeyResponse {})
|
||||
.await;
|
||||
|
||||
let payload = AuthStatusChangeNotification {
|
||||
auth_method: self.auth_manager.auth().map(|auth| auth.mode),
|
||||
};
|
||||
self.outgoing
|
||||
.send_server_notification(ServerNotification::AuthStatusChange(payload))
|
||||
.await;
|
||||
}
|
||||
Err(err) => {
|
||||
let error = JSONRPCErrorError {
|
||||
code: INTERNAL_ERROR_CODE,
|
||||
message: format!("failed to save api key: {err}"),
|
||||
data: None,
|
||||
};
|
||||
self.outgoing.send_error(request_id, error).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn login_chatgpt(&mut self, request_id: RequestId) {
|
||||
let config = self.config.as_ref();
|
||||
|
||||
@@ -352,7 +390,7 @@ impl CodexMessageProcessor {
|
||||
.await;
|
||||
|
||||
// Send auth status change notification reflecting the current auth mode
|
||||
// after logout (which may fall back to API key via env var).
|
||||
// after logout.
|
||||
let current_auth_method = self.auth_manager.auth().map(|auth| auth.mode);
|
||||
let payload = AuthStatusChangeNotification {
|
||||
auth_method: current_auth_method,
|
||||
@@ -367,7 +405,6 @@ impl CodexMessageProcessor {
|
||||
request_id: RequestId,
|
||||
params: codex_protocol::mcp_protocol::GetAuthStatusParams,
|
||||
) {
|
||||
let preferred_auth_method: AuthMode = self.auth_manager.preferred_auth_method();
|
||||
let include_token = params.include_token.unwrap_or(false);
|
||||
let do_refresh = params.refresh_token.unwrap_or(false);
|
||||
|
||||
@@ -375,6 +412,11 @@ impl CodexMessageProcessor {
|
||||
tracing::warn!("failed to refresh token while getting auth status: {err}");
|
||||
}
|
||||
|
||||
// Determine whether auth is required based on the active model provider.
|
||||
// If a custom provider is configured with `requires_openai_auth == false`,
|
||||
// then no auth step is required; otherwise, default to requiring auth.
|
||||
let requires_openai_auth = Some(self.config.model_provider.requires_openai_auth);
|
||||
|
||||
let response = match self.auth_manager.auth() {
|
||||
Some(auth) => {
|
||||
let (reported_auth_method, token_opt) = match auth.get_token().await {
|
||||
@@ -390,14 +432,14 @@ impl CodexMessageProcessor {
|
||||
};
|
||||
codex_protocol::mcp_protocol::GetAuthStatusResponse {
|
||||
auth_method: reported_auth_method,
|
||||
preferred_auth_method,
|
||||
auth_token: token_opt,
|
||||
requires_openai_auth,
|
||||
}
|
||||
}
|
||||
None => codex_protocol::mcp_protocol::GetAuthStatusResponse {
|
||||
auth_method: None,
|
||||
preferred_auth_method,
|
||||
auth_token: None,
|
||||
requires_openai_auth,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -222,7 +222,7 @@ async fn run_codex_tool_session_inner(
|
||||
}
|
||||
EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) => {
|
||||
let text = match last_agent_message {
|
||||
Some(msg) => msg.clone(),
|
||||
Some(msg) => msg,
|
||||
None => "".to_string(),
|
||||
};
|
||||
let result = CallToolResult {
|
||||
|
||||
@@ -56,8 +56,7 @@ impl MessageProcessor {
|
||||
config: Arc<Config>,
|
||||
) -> Self {
|
||||
let outgoing = Arc::new(outgoing);
|
||||
let auth_manager =
|
||||
AuthManager::shared(config.codex_home.clone(), config.preferred_auth_method);
|
||||
let auth_manager = AuthManager::shared(config.codex_home.clone());
|
||||
let conversation_manager = Arc::new(ConversationManager::new(auth_manager.clone()));
|
||||
let codex_message_processor = CodexMessageProcessor::new(
|
||||
auth_manager,
|
||||
@@ -532,7 +531,6 @@ impl MessageProcessor {
|
||||
|
||||
// Spawn the long-running reply handler.
|
||||
tokio::spawn({
|
||||
let codex = codex.clone();
|
||||
let outgoing = outgoing.clone();
|
||||
let prompt = prompt.clone();
|
||||
let running_requests_id_to_codex_uuid = running_requests_id_to_codex_uuid.clone();
|
||||
|
||||
@@ -299,7 +299,7 @@ mod tests {
|
||||
let Ok(expected_params) = serde_json::to_value(&event) else {
|
||||
panic!("Event must serialize");
|
||||
};
|
||||
assert_eq!(params, Some(expected_params.clone()));
|
||||
assert_eq!(params, Some(expected_params));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -18,6 +18,7 @@ use codex_protocol::mcp_protocol::CancelLoginChatGptParams;
|
||||
use codex_protocol::mcp_protocol::GetAuthStatusParams;
|
||||
use codex_protocol::mcp_protocol::InterruptConversationParams;
|
||||
use codex_protocol::mcp_protocol::ListConversationsParams;
|
||||
use codex_protocol::mcp_protocol::LoginApiKeyParams;
|
||||
use codex_protocol::mcp_protocol::NewConversationParams;
|
||||
use codex_protocol::mcp_protocol::RemoveConversationListenerParams;
|
||||
use codex_protocol::mcp_protocol::ResumeConversationParams;
|
||||
@@ -318,6 +319,15 @@ impl McpProcess {
|
||||
self.send_request("resumeConversation", params).await
|
||||
}
|
||||
|
||||
/// Send a `loginApiKey` JSON-RPC request.
|
||||
pub async fn send_login_api_key_request(
|
||||
&mut self,
|
||||
params: LoginApiKeyParams,
|
||||
) -> anyhow::Result<i64> {
|
||||
let params = Some(serde_json::to_value(params)?);
|
||||
self.send_request("loginApiKey", params).await
|
||||
}
|
||||
|
||||
/// Send a `loginChatGpt` JSON-RPC request.
|
||||
pub async fn send_login_chat_gpt_request(&mut self) -> anyhow::Result<i64> {
|
||||
self.send_request("loginChatGpt", None).await
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use std::path::Path;
|
||||
|
||||
use codex_core::auth::login_with_api_key;
|
||||
use codex_protocol::mcp_protocol::AuthMode;
|
||||
use codex_protocol::mcp_protocol::GetAuthStatusParams;
|
||||
use codex_protocol::mcp_protocol::GetAuthStatusResponse;
|
||||
use codex_protocol::mcp_protocol::LoginApiKeyParams;
|
||||
use codex_protocol::mcp_protocol::LoginApiKeyResponse;
|
||||
use mcp_test_support::McpProcess;
|
||||
use mcp_test_support::to_response;
|
||||
use mcp_types::JSONRPCResponse;
|
||||
@@ -36,10 +37,29 @@ stream_max_retries = 0
|
||||
)
|
||||
}
|
||||
|
||||
async fn login_with_api_key_via_request(mcp: &mut McpProcess, api_key: &str) {
|
||||
let request_id = mcp
|
||||
.send_login_api_key_request(LoginApiKeyParams {
|
||||
api_key: api_key.to_string(),
|
||||
})
|
||||
.await
|
||||
.unwrap_or_else(|e| panic!("send loginApiKey: {e}"));
|
||||
|
||||
let resp: JSONRPCResponse = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| panic!("loginApiKey timeout: {e}"))
|
||||
.unwrap_or_else(|e| panic!("loginApiKey response: {e}"));
|
||||
let _: LoginApiKeyResponse =
|
||||
to_response(resp).unwrap_or_else(|e| panic!("deserialize login response: {e}"));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn get_auth_status_no_auth() {
|
||||
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
|
||||
create_config_toml(codex_home.path()).expect("write config.toml");
|
||||
create_config_toml(codex_home.path()).unwrap_or_else(|err| panic!("write config.toml: {err}"));
|
||||
|
||||
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)])
|
||||
.await
|
||||
@@ -72,8 +92,7 @@ async fn get_auth_status_no_auth() {
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn get_auth_status_with_api_key() {
|
||||
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
|
||||
create_config_toml(codex_home.path()).expect("write config.toml");
|
||||
login_with_api_key(codex_home.path(), "sk-test-key").expect("seed api key");
|
||||
create_config_toml(codex_home.path()).unwrap_or_else(|err| panic!("write config.toml: {err}"));
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
@@ -83,6 +102,8 @@ async fn get_auth_status_with_api_key() {
|
||||
.expect("init timeout")
|
||||
.expect("init failed");
|
||||
|
||||
login_with_api_key_via_request(&mut mcp, "sk-test-key").await;
|
||||
|
||||
let request_id = mcp
|
||||
.send_get_auth_status_request(GetAuthStatusParams {
|
||||
include_token: Some(true),
|
||||
@@ -101,14 +122,12 @@ async fn get_auth_status_with_api_key() {
|
||||
let status: GetAuthStatusResponse = to_response(resp).expect("deserialize status");
|
||||
assert_eq!(status.auth_method, Some(AuthMode::ApiKey));
|
||||
assert_eq!(status.auth_token, Some("sk-test-key".to_string()));
|
||||
assert_eq!(status.preferred_auth_method, AuthMode::ChatGPT);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn get_auth_status_with_api_key_no_include_token() {
|
||||
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
|
||||
create_config_toml(codex_home.path()).expect("write config.toml");
|
||||
login_with_api_key(codex_home.path(), "sk-test-key").expect("seed api key");
|
||||
create_config_toml(codex_home.path()).unwrap_or_else(|err| panic!("write config.toml: {err}"));
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
@@ -118,6 +137,8 @@ async fn get_auth_status_with_api_key_no_include_token() {
|
||||
.expect("init timeout")
|
||||
.expect("init failed");
|
||||
|
||||
login_with_api_key_via_request(&mut mcp, "sk-test-key").await;
|
||||
|
||||
// Build params via struct so None field is omitted in wire JSON.
|
||||
let params = GetAuthStatusParams {
|
||||
include_token: None,
|
||||
@@ -138,5 +159,4 @@ async fn get_auth_status_with_api_key_no_include_token() {
|
||||
let status: GetAuthStatusResponse = to_response(resp).expect("deserialize status");
|
||||
assert_eq!(status.auth_method, Some(AuthMode::ApiKey));
|
||||
assert!(status.auth_token.is_none(), "token must be omitted");
|
||||
assert_eq!(status.preferred_auth_method, AuthMode::ChatGPT);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use codex_core::auth::login_with_api_key;
|
||||
use codex_login::login_with_api_key;
|
||||
use codex_protocol::mcp_protocol::CancelLoginChatGptParams;
|
||||
use codex_protocol::mcp_protocol::CancelLoginChatGptResponse;
|
||||
use codex_protocol::mcp_protocol::GetAuthStatusParams;
|
||||
@@ -95,7 +95,7 @@ async fn logout_chatgpt_removes_auth() {
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn login_and_cancel_chatgpt() {
|
||||
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
|
||||
create_config_toml(codex_home.path()).expect("write config.toml");
|
||||
create_config_toml(codex_home.path()).unwrap_or_else(|err| panic!("write config.toml: {err}"));
|
||||
|
||||
let mut mcp = McpProcess::new(codex_home.path())
|
||||
.await
|
||||
|
||||
21
codex-rs/mcp-types/check_lib_rs.py
Executable file
21
codex-rs/mcp-types/check_lib_rs.py
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def main() -> int:
|
||||
crate_dir = Path(__file__).resolve().parent
|
||||
generator = crate_dir / "generate_mcp_types.py"
|
||||
|
||||
result = subprocess.run(
|
||||
[sys.executable, str(generator), "--check"],
|
||||
cwd=crate_dir,
|
||||
check=False,
|
||||
)
|
||||
return result.returncode
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -5,15 +5,19 @@ import argparse
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
)
|
||||
from difflib import unified_diff
|
||||
from pathlib import Path
|
||||
from shutil import copy2
|
||||
|
||||
# Helper first so it is defined when other functions call it.
|
||||
from typing import Any, Literal
|
||||
|
||||
|
||||
SCHEMA_VERSION = "2025-06-18"
|
||||
JSONRPC_VERSION = "2.0"
|
||||
|
||||
@@ -43,16 +47,31 @@ def main() -> int:
|
||||
default_schema_file = (
|
||||
Path(__file__).resolve().parent / "schema" / SCHEMA_VERSION / "schema.json"
|
||||
)
|
||||
default_lib_rs = Path(__file__).resolve().parent / "src/lib.rs"
|
||||
parser.add_argument(
|
||||
"schema_file",
|
||||
nargs="?",
|
||||
default=default_schema_file,
|
||||
help="schema.json file to process",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--check",
|
||||
action="store_true",
|
||||
help="Regenerate lib.rs in a sandbox and ensure the checked-in file matches",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
schema_file = args.schema_file
|
||||
schema_file = Path(args.schema_file)
|
||||
crate_dir = Path(__file__).resolve().parent
|
||||
|
||||
lib_rs = Path(__file__).resolve().parent / "src/lib.rs"
|
||||
if args.check:
|
||||
return run_check(schema_file, crate_dir, default_lib_rs)
|
||||
|
||||
generate_lib_rs(schema_file, default_lib_rs, fmt=True)
|
||||
return 0
|
||||
|
||||
|
||||
def generate_lib_rs(schema_file: Path, lib_rs: Path, fmt: bool) -> None:
|
||||
lib_rs.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
global DEFINITIONS # Allow helper functions to access the schema.
|
||||
|
||||
@@ -117,9 +136,7 @@ fn default_jsonrpc() -> String {{ JSONRPC_VERSION.to_owned() }}
|
||||
|
||||
for req_name in CLIENT_REQUEST_TYPE_NAMES:
|
||||
defn = definitions[req_name]
|
||||
method_const = (
|
||||
defn.get("properties", {}).get("method", {}).get("const", req_name)
|
||||
)
|
||||
method_const = defn.get("properties", {}).get("method", {}).get("const", req_name)
|
||||
payload_type = f"<{req_name} as ModelContextProtocolRequest>::Params"
|
||||
try_from_impl_lines.append(f' "{method_const}" => {{\n')
|
||||
try_from_impl_lines.append(
|
||||
@@ -128,9 +145,7 @@ fn default_jsonrpc() -> String {{ JSONRPC_VERSION.to_owned() }}
|
||||
try_from_impl_lines.append(
|
||||
f" let params: {payload_type} = serde_json::from_value(params_json)?;\n"
|
||||
)
|
||||
try_from_impl_lines.append(
|
||||
f" Ok(ClientRequest::{req_name}(params))\n"
|
||||
)
|
||||
try_from_impl_lines.append(f" Ok(ClientRequest::{req_name}(params))\n")
|
||||
try_from_impl_lines.append(" },\n")
|
||||
|
||||
try_from_impl_lines.append(
|
||||
@@ -144,9 +159,7 @@ fn default_jsonrpc() -> String {{ JSONRPC_VERSION.to_owned() }}
|
||||
|
||||
# Generate TryFrom for ServerNotification
|
||||
notif_impl_lines: list[str] = []
|
||||
notif_impl_lines.append(
|
||||
"impl TryFrom<JSONRPCNotification> for ServerNotification {\n"
|
||||
)
|
||||
notif_impl_lines.append("impl TryFrom<JSONRPCNotification> for ServerNotification {\n")
|
||||
notif_impl_lines.append(" type Error = serde_json::Error;\n")
|
||||
notif_impl_lines.append(
|
||||
" fn try_from(n: JSONRPCNotification) -> std::result::Result<Self, Self::Error> {\n"
|
||||
@@ -155,9 +168,7 @@ fn default_jsonrpc() -> String {{ JSONRPC_VERSION.to_owned() }}
|
||||
|
||||
for notif_name in SERVER_NOTIFICATION_TYPE_NAMES:
|
||||
n_def = definitions[notif_name]
|
||||
method_const = (
|
||||
n_def.get("properties", {}).get("method", {}).get("const", notif_name)
|
||||
)
|
||||
method_const = n_def.get("properties", {}).get("method", {}).get("const", notif_name)
|
||||
payload_type = f"<{notif_name} as ModelContextProtocolNotification>::Params"
|
||||
notif_impl_lines.append(f' "{method_const}" => {{\n')
|
||||
# params may be optional
|
||||
@@ -167,9 +178,7 @@ fn default_jsonrpc() -> String {{ JSONRPC_VERSION.to_owned() }}
|
||||
notif_impl_lines.append(
|
||||
f" let params: {payload_type} = serde_json::from_value(params_json)?;\n"
|
||||
)
|
||||
notif_impl_lines.append(
|
||||
f" Ok(ServerNotification::{notif_name}(params))\n"
|
||||
)
|
||||
notif_impl_lines.append(f" Ok(ServerNotification::{notif_name}(params))\n")
|
||||
notif_impl_lines.append(" },\n")
|
||||
|
||||
notif_impl_lines.append(
|
||||
@@ -185,13 +194,70 @@ fn default_jsonrpc() -> String {{ JSONRPC_VERSION.to_owned() }}
|
||||
for chunk in out:
|
||||
f.write(chunk)
|
||||
|
||||
subprocess.check_call(
|
||||
["cargo", "fmt", "--", "--config", "imports_granularity=Item"],
|
||||
cwd=lib_rs.parent.parent,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
if fmt:
|
||||
subprocess.check_call(
|
||||
["cargo", "fmt", "--", "--config", "imports_granularity=Item"],
|
||||
cwd=lib_rs.parent.parent,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
|
||||
return 0
|
||||
|
||||
def run_check(schema_file: Path, crate_dir: Path, checked_in_lib: Path) -> int:
|
||||
config_path = crate_dir.parent / "rustfmt.toml"
|
||||
eprint(f"Running --check with schema {schema_file}")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
tmp_path = Path(tmp_dir)
|
||||
eprint(f"Created temporary workspace at {tmp_path}")
|
||||
manifest_path = tmp_path / "Cargo.toml"
|
||||
eprint(f"Copying Cargo.toml into {manifest_path}")
|
||||
copy2(crate_dir / "Cargo.toml", manifest_path)
|
||||
manifest_text = manifest_path.read_text(encoding="utf-8")
|
||||
manifest_text = manifest_text.replace(
|
||||
"version = { workspace = true }",
|
||||
'version = "0.0.0"',
|
||||
)
|
||||
manifest_text = manifest_text.replace("\n[lints]\nworkspace = true\n", "\n")
|
||||
manifest_path.write_text(manifest_text, encoding="utf-8")
|
||||
src_dir = tmp_path / "src"
|
||||
src_dir.mkdir(parents=True, exist_ok=True)
|
||||
eprint(f"Generating lib.rs into {src_dir}")
|
||||
generated_lib = src_dir / "lib.rs"
|
||||
|
||||
generate_lib_rs(schema_file, generated_lib, fmt=False)
|
||||
|
||||
eprint("Formatting generated lib.rs with rustfmt")
|
||||
subprocess.check_call(
|
||||
[
|
||||
"rustfmt",
|
||||
"--config-path",
|
||||
str(config_path),
|
||||
str(generated_lib),
|
||||
],
|
||||
cwd=tmp_path,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
|
||||
eprint("Comparing generated lib.rs with checked-in version")
|
||||
checked_in_contents = checked_in_lib.read_text(encoding="utf-8")
|
||||
generated_contents = generated_lib.read_text(encoding="utf-8")
|
||||
|
||||
if checked_in_contents == generated_contents:
|
||||
eprint("lib.rs matches checked-in version")
|
||||
return 0
|
||||
|
||||
diff = unified_diff(
|
||||
checked_in_contents.splitlines(keepends=True),
|
||||
generated_contents.splitlines(keepends=True),
|
||||
fromfile=str(checked_in_lib),
|
||||
tofile=str(generated_lib),
|
||||
)
|
||||
diff_text = "".join(diff)
|
||||
eprint("Generated lib.rs does not match the checked-in version. Diff:")
|
||||
if diff_text:
|
||||
eprint(diff_text, end="")
|
||||
eprint("Re-run generate_mcp_types.py without --check to update src/lib.rs.")
|
||||
return 1
|
||||
|
||||
|
||||
def add_definition(name: str, definition: dict[str, Any], out: list[str]) -> None:
|
||||
@@ -421,15 +487,11 @@ def define_untagged_enum(name: str, type_list: list[str], out: list[str]) -> Non
|
||||
case "integer":
|
||||
out.append(" Integer(i64),\n")
|
||||
case _:
|
||||
raise ValueError(
|
||||
f"Unknown type in untagged enum: {simple_type} in {name}"
|
||||
)
|
||||
raise ValueError(f"Unknown type in untagged enum: {simple_type} in {name}")
|
||||
out.append("}\n\n")
|
||||
|
||||
|
||||
def define_any_of(
|
||||
name: str, list_of_refs: list[Any], description: str | None = None
|
||||
) -> list[str]:
|
||||
def define_any_of(name: str, list_of_refs: list[Any], description: str | None = None) -> list[str]:
|
||||
"""Generate a Rust enum for a JSON-Schema `anyOf` union.
|
||||
|
||||
For most types we simply map each `$ref` inside the `anyOf` list to a
|
||||
@@ -494,9 +556,7 @@ def define_any_of(
|
||||
if name == "ClientRequest":
|
||||
payload_type = f"<{ref_name} as ModelContextProtocolRequest>::Params"
|
||||
else:
|
||||
payload_type = (
|
||||
f"<{ref_name} as ModelContextProtocolNotification>::Params"
|
||||
)
|
||||
payload_type = f"<{ref_name} as ModelContextProtocolNotification>::Params"
|
||||
|
||||
# Determine the wire value for `method` so we can annotate the
|
||||
# variant appropriately. If for some reason the schema does not
|
||||
@@ -504,9 +564,7 @@ def define_any_of(
|
||||
# least compile (although deserialization will likely fail).
|
||||
request_def = DEFINITIONS.get(ref_name, {})
|
||||
method_const = (
|
||||
request_def.get("properties", {})
|
||||
.get("method", {})
|
||||
.get("const", ref_name)
|
||||
request_def.get("properties", {}).get("method", {}).get("const", ref_name)
|
||||
)
|
||||
|
||||
out.append(f' #[serde(rename = "{method_const}")]\n')
|
||||
@@ -556,7 +614,7 @@ def map_type(
|
||||
if type_prop == "string":
|
||||
if const_prop := typedef.get("const", None):
|
||||
assert isinstance(const_prop, str)
|
||||
return f'&\'static str = "{const_prop }"'
|
||||
return f'&\'static str = "{const_prop}"'
|
||||
else:
|
||||
return "String"
|
||||
elif type_prop == "integer":
|
||||
@@ -632,7 +690,7 @@ def rust_prop_name(name: str, is_optional: bool) -> RustProp:
|
||||
serde_annotations.append('skip_serializing_if = "Option::is_none"')
|
||||
|
||||
if serde_annotations:
|
||||
serde_str = f'#[serde({", ".join(serde_annotations)})]'
|
||||
serde_str = f"#[serde({', '.join(serde_annotations)})]"
|
||||
else:
|
||||
serde_str = None
|
||||
return RustProp(prop_name, serde_str)
|
||||
@@ -640,9 +698,7 @@ def rust_prop_name(name: str, is_optional: bool) -> RustProp:
|
||||
|
||||
def to_snake_case(name: str) -> str:
|
||||
"""Convert a camelCase or PascalCase name to snake_case."""
|
||||
snake_case = name[0].lower() + "".join(
|
||||
"_" + c.lower() if c.isupper() else c for c in name[1:]
|
||||
)
|
||||
snake_case = name[0].lower() + "".join("_" + c.lower() if c.isupper() else c for c in name[1:])
|
||||
if snake_case != name:
|
||||
return snake_case
|
||||
else:
|
||||
@@ -678,5 +734,9 @@ def emit_doc_comment(text: str | None, out: list[str]) -> None:
|
||||
out.append(f"/// {line.rstrip()}\n")
|
||||
|
||||
|
||||
def eprint(*args: Any, **kwargs: Any) -> None:
|
||||
print(*args, file=sys.stderr, **kwargs)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
||||
@@ -31,6 +31,8 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
|
||||
codex_protocol::mcp_protocol::SendUserTurnResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::InterruptConversationResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::GitDiffToRemoteResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::LoginApiKeyParams::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::LoginApiKeyResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::LoginChatGptResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::CancelLoginChatGptResponse::export_all_to(out_dir)?;
|
||||
codex_protocol::mcp_protocol::LogoutChatGptResponse::export_all_to(out_dir)?;
|
||||
|
||||
@@ -126,6 +126,11 @@ pub enum ClientRequest {
|
||||
request_id: RequestId,
|
||||
params: GitDiffToRemoteParams,
|
||||
},
|
||||
LoginApiKey {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
params: LoginApiKeyParams,
|
||||
},
|
||||
LoginChatGpt {
|
||||
#[serde(rename = "id")]
|
||||
request_id: RequestId,
|
||||
@@ -288,6 +293,16 @@ pub struct ArchiveConversationResponse {}
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RemoveConversationSubscriptionResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LoginApiKeyParams {
|
||||
pub api_key: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LoginApiKeyResponse {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LoginChatGptResponse {
|
||||
@@ -367,9 +382,14 @@ pub struct ExecArbitraryCommandResponse {
|
||||
pub struct GetAuthStatusResponse {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub auth_method: Option<AuthMode>,
|
||||
pub preferred_auth_method: AuthMode,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub auth_token: Option<String>,
|
||||
|
||||
// Indicates that auth method must be valid to use the server.
|
||||
// This can be false if using a custom provider that is configured
|
||||
// with requires_openai_auth == false.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub requires_openai_auth: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
|
||||
|
||||
@@ -219,7 +219,7 @@ impl From<Vec<InputItem>> for ResponseInputItem {
|
||||
let mime = mime_guess::from_path(&path)
|
||||
.first()
|
||||
.map(|m| m.essence_str().to_owned())
|
||||
.unwrap_or_else(|| "application/octet-stream".to_string());
|
||||
.unwrap_or_else(|| "image".to_string());
|
||||
let encoded = base64::engine::general_purpose::STANDARD.encode(bytes);
|
||||
Some(ContentItem::InputImage {
|
||||
image_url: format!("data:{mime};base64,{encoded}"),
|
||||
|
||||
@@ -897,9 +897,26 @@ pub struct SessionMetaLine {
|
||||
pub enum RolloutItem {
|
||||
SessionMeta(SessionMetaLine),
|
||||
ResponseItem(ResponseItem),
|
||||
Compacted(CompactedItem),
|
||||
TurnContext(TurnContextItem),
|
||||
EventMsg(EventMsg),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, TS)]
|
||||
pub struct CompactedItem {
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, TS)]
|
||||
pub struct TurnContextItem {
|
||||
pub cwd: PathBuf,
|
||||
pub approval_policy: AskForApproval,
|
||||
pub sandbox_policy: SandboxPolicy,
|
||||
pub model: String,
|
||||
pub effort: ReasoningEffortConfig,
|
||||
pub summary: ReasoningSummaryConfig,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
pub struct RolloutLine {
|
||||
pub timestamp: String,
|
||||
|
||||
@@ -11,6 +11,8 @@ use codex_ansi_escape::ansi_escape_line;
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::ConversationManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::persist_model_selection;
|
||||
use codex_core::model_family::find_family_for_model;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use color_eyre::eyre::Result;
|
||||
use color_eyre::eyre::WrapErr;
|
||||
@@ -37,6 +39,9 @@ pub(crate) struct App {
|
||||
|
||||
/// Config is stored here so we can recreate ChatWidgets as needed.
|
||||
pub(crate) config: Config,
|
||||
pub(crate) active_profile: Option<String>,
|
||||
model_saved_to_profile: bool,
|
||||
model_saved_to_global: bool,
|
||||
|
||||
pub(crate) file_search: FileSearchManager,
|
||||
|
||||
@@ -61,6 +66,7 @@ impl App {
|
||||
tui: &mut tui::Tui,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
config: Config,
|
||||
active_profile: Option<String>,
|
||||
initial_prompt: Option<String>,
|
||||
initial_images: Vec<PathBuf>,
|
||||
resume_selection: ResumeSelection,
|
||||
@@ -119,6 +125,9 @@ impl App {
|
||||
app_event_tx,
|
||||
chat_widget,
|
||||
config,
|
||||
active_profile,
|
||||
model_saved_to_profile: false,
|
||||
model_saved_to_global: false,
|
||||
file_search,
|
||||
enhanced_keys_supported,
|
||||
transcript_lines: Vec::new(),
|
||||
@@ -291,7 +300,14 @@ impl App {
|
||||
self.chat_widget.set_reasoning_effort(effort);
|
||||
}
|
||||
AppEvent::UpdateModel(model) => {
|
||||
self.chat_widget.set_model(model);
|
||||
self.chat_widget.set_model(model.clone());
|
||||
self.config.model = model.clone();
|
||||
if let Some(family) = find_family_for_model(&model) {
|
||||
self.config.model_family = family;
|
||||
}
|
||||
self.model_saved_to_profile = false;
|
||||
self.model_saved_to_global = false;
|
||||
self.show_model_save_hint();
|
||||
}
|
||||
AppEvent::UpdateAskForApprovalPolicy(policy) => {
|
||||
self.chat_widget.set_approval_policy(policy);
|
||||
@@ -304,7 +320,93 @@ impl App {
|
||||
}
|
||||
|
||||
pub(crate) fn token_usage(&self) -> codex_core::protocol::TokenUsage {
|
||||
self.chat_widget.token_usage().clone()
|
||||
self.chat_widget.token_usage()
|
||||
}
|
||||
|
||||
fn show_model_save_hint(&mut self) {
|
||||
let model = self.config.model.clone();
|
||||
if self.active_profile.is_some() {
|
||||
self.chat_widget.add_info_message(format!(
|
||||
"Model switched to {model}. Press Ctrl+S to save it for this profile, then press Ctrl+S again to set it as your global default."
|
||||
));
|
||||
} else {
|
||||
self.chat_widget.add_info_message(format!(
|
||||
"Model switched to {model}. Press Ctrl+S to save it as your global default."
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
async fn persist_model_shortcut(&mut self) {
|
||||
enum SaveScope<'a> {
|
||||
Profile(&'a str),
|
||||
Global,
|
||||
AlreadySaved,
|
||||
}
|
||||
|
||||
let scope = if let Some(profile) = self
|
||||
.active_profile
|
||||
.as_deref()
|
||||
.filter(|_| !self.model_saved_to_profile)
|
||||
{
|
||||
SaveScope::Profile(profile)
|
||||
} else if !self.model_saved_to_global {
|
||||
SaveScope::Global
|
||||
} else {
|
||||
SaveScope::AlreadySaved
|
||||
};
|
||||
|
||||
let model = self.config.model.clone();
|
||||
let effort = self.config.model_reasoning_effort;
|
||||
let codex_home = self.config.codex_home.clone();
|
||||
|
||||
match scope {
|
||||
SaveScope::Profile(profile) => {
|
||||
match persist_model_selection(&codex_home, Some(profile), &model, Some(effort))
|
||||
.await
|
||||
{
|
||||
Ok(()) => {
|
||||
self.model_saved_to_profile = true;
|
||||
self.chat_widget.add_info_message(format!(
|
||||
"Saved model {model} ({effort}) for profile `{profile}`. Press Ctrl+S again to make this your global default."
|
||||
));
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!(
|
||||
error = %err,
|
||||
"failed to persist model selection via shortcut"
|
||||
);
|
||||
self.chat_widget.add_error_message(format!(
|
||||
"Failed to save model preference for profile `{profile}`: {err}"
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
SaveScope::Global => {
|
||||
match persist_model_selection(&codex_home, None, &model, Some(effort)).await {
|
||||
Ok(()) => {
|
||||
self.model_saved_to_global = true;
|
||||
self.chat_widget.add_info_message(format!(
|
||||
"Saved model {model} ({effort}) as your global default."
|
||||
));
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!(
|
||||
error = %err,
|
||||
"failed to persist global model selection via shortcut"
|
||||
);
|
||||
self.chat_widget.add_error_message(format!(
|
||||
"Failed to save global model preference: {err}"
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
SaveScope::AlreadySaved => {
|
||||
self.chat_widget.add_info_message(
|
||||
"Model preference already saved globally; no further action needed."
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_key_event(&mut self, tui: &mut tui::Tui, key_event: KeyEvent) {
|
||||
@@ -320,6 +422,14 @@ impl App {
|
||||
self.overlay = Some(Overlay::new_transcript(self.transcript_lines.clone()));
|
||||
tui.frame_requester().schedule_frame();
|
||||
}
|
||||
KeyEvent {
|
||||
code: KeyCode::Char('s'),
|
||||
modifiers: crossterm::event::KeyModifiers::CONTROL,
|
||||
kind: KeyEventKind::Press,
|
||||
..
|
||||
} => {
|
||||
self.persist_model_shortcut().await;
|
||||
}
|
||||
// Esc primes/advances backtracking only in normal (not working) mode
|
||||
// with an empty composer. In any other state, forward Esc so the
|
||||
// active UI (e.g. status indicator, modals, popups) handles it.
|
||||
|
||||
@@ -335,7 +335,7 @@ impl App {
|
||||
self.trim_transcript_for_backtrack(drop_count);
|
||||
self.render_transcript_once(tui);
|
||||
if !prefill.is_empty() {
|
||||
self.chat_widget.insert_str(prefill);
|
||||
self.chat_widget.set_composer_text(prefill.to_string());
|
||||
}
|
||||
tui.frame_requester().schedule_frame();
|
||||
}
|
||||
|
||||
@@ -243,6 +243,10 @@ impl ChatComposer {
|
||||
|
||||
/// Replace the entire composer content with `text` and reset cursor.
|
||||
pub(crate) fn set_text_content(&mut self, text: String) {
|
||||
// Clear any existing content, placeholders, and attachments first.
|
||||
self.textarea.set_text("");
|
||||
self.pending_pastes.clear();
|
||||
self.attached_images.clear();
|
||||
self.textarea.set_text(&text);
|
||||
self.textarea.set_cursor(0);
|
||||
self.sync_command_popup();
|
||||
@@ -483,7 +487,7 @@ impl ChatComposer {
|
||||
} => {
|
||||
// Hide popup without modifying text, remember token to avoid immediate reopen.
|
||||
if let Some(tok) = Self::current_at_token(&self.textarea) {
|
||||
self.dismissed_file_popup_token = Some(tok.to_string());
|
||||
self.dismissed_file_popup_token = Some(tok);
|
||||
}
|
||||
self.active_popup = ActivePopup::None;
|
||||
(InputResult::None, true)
|
||||
@@ -542,7 +546,7 @@ impl ChatComposer {
|
||||
Some(ext) if ext == "jpg" || ext == "jpeg" => "JPEG",
|
||||
_ => "IMG",
|
||||
};
|
||||
self.attach_image(path_buf.clone(), w, h, format_label);
|
||||
self.attach_image(path_buf, w, h, format_label);
|
||||
// Add a trailing space to keep typing fluid.
|
||||
self.textarea.insert_str(" ");
|
||||
} else {
|
||||
@@ -2119,7 +2123,7 @@ mod tests {
|
||||
|
||||
// Re-add and test backspace in middle: should break the placeholder string
|
||||
// and drop the image mapping (same as text placeholder behavior).
|
||||
composer.attach_image(path.clone(), 20, 10, "PNG");
|
||||
composer.attach_image(path, 20, 10, "PNG");
|
||||
let placeholder2 = composer.attached_images[0].placeholder.clone();
|
||||
// Move cursor to roughly middle of placeholder
|
||||
if let Some(start_pos) = composer.textarea.text().find(&placeholder2) {
|
||||
@@ -2178,7 +2182,7 @@ mod tests {
|
||||
let path1 = PathBuf::from("/tmp/image_dup1.png");
|
||||
let path2 = PathBuf::from("/tmp/image_dup2.png");
|
||||
|
||||
composer.attach_image(path1.clone(), 10, 5, "PNG");
|
||||
composer.attach_image(path1, 10, 5, "PNG");
|
||||
// separate placeholders with a space for clarity
|
||||
composer.handle_paste(" ".into());
|
||||
composer.attach_image(path2.clone(), 10, 5, "PNG");
|
||||
@@ -2227,7 +2231,7 @@ mod tests {
|
||||
assert!(composer.textarea.text().starts_with("[image 3x2 PNG] "));
|
||||
|
||||
let imgs = composer.take_recent_submission_images();
|
||||
assert_eq!(imgs, vec![tmp_path.clone()]);
|
||||
assert_eq!(imgs, vec![tmp_path]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -564,7 +564,7 @@ mod tests {
|
||||
let (tx_raw, rx) = unbounded_channel::<AppEvent>();
|
||||
let tx = AppEventSender::new(tx_raw);
|
||||
let mut pane = BottomPane::new(BottomPaneParams {
|
||||
app_event_tx: tx.clone(),
|
||||
app_event_tx: tx,
|
||||
frame_requester: FrameRequester::test_dummy(),
|
||||
has_input_focus: true,
|
||||
enhanced_keys_supported: false,
|
||||
|
||||
@@ -649,9 +649,7 @@ impl TextArea {
|
||||
}
|
||||
|
||||
fn add_element(&mut self, range: Range<usize>) {
|
||||
let elem = TextElement {
|
||||
range: range.clone(),
|
||||
};
|
||||
let elem = TextElement { range };
|
||||
self.elements.push(elem);
|
||||
self.elements.sort_by_key(|e| e.range.start);
|
||||
}
|
||||
|
||||
@@ -574,14 +574,14 @@ impl ChatWidget {
|
||||
self.active_exec_cell = Some(history_cell::new_active_exec_command(
|
||||
ev.call_id.clone(),
|
||||
ev.command.clone(),
|
||||
ev.parsed_cmd.clone(),
|
||||
ev.parsed_cmd,
|
||||
));
|
||||
}
|
||||
} else {
|
||||
self.active_exec_cell = Some(history_cell::new_active_exec_command(
|
||||
ev.call_id.clone(),
|
||||
ev.command.clone(),
|
||||
ev.parsed_cmd.clone(),
|
||||
ev.parsed_cmd,
|
||||
));
|
||||
}
|
||||
|
||||
@@ -804,7 +804,7 @@ impl ChatWidget {
|
||||
"attach_image path={path:?} width={width} height={height} format={format_label}",
|
||||
);
|
||||
self.bottom_pane
|
||||
.attach_image(path.clone(), width, height, format_label);
|
||||
.attach_image(path, width, height, format_label);
|
||||
self.request_redraw();
|
||||
}
|
||||
|
||||
@@ -986,7 +986,7 @@ impl ChatWidget {
|
||||
|
||||
// Only show the text portion in conversation history.
|
||||
if !text.is_empty() {
|
||||
self.add_to_history(history_cell::new_user_prompt(text.clone()));
|
||||
self.add_to_history(history_cell::new_user_prompt(text));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1055,10 +1055,10 @@ impl ChatWidget {
|
||||
EventMsg::PlanUpdate(update) => self.on_plan_update(update),
|
||||
EventMsg::ExecApprovalRequest(ev) => {
|
||||
// For replayed events, synthesize an empty id (these should not occur).
|
||||
self.on_exec_approval_request(id.clone().unwrap_or_default(), ev)
|
||||
self.on_exec_approval_request(id.unwrap_or_default(), ev)
|
||||
}
|
||||
EventMsg::ApplyPatchApprovalRequest(ev) => {
|
||||
self.on_apply_patch_approval_request(id.clone().unwrap_or_default(), ev)
|
||||
self.on_apply_patch_approval_request(id.unwrap_or_default(), ev)
|
||||
}
|
||||
EventMsg::ExecCommandBegin(ev) => self.on_exec_command_begin(ev),
|
||||
EventMsg::ExecCommandOutputDelta(delta) => self.on_exec_command_output_delta(delta),
|
||||
@@ -1207,7 +1207,7 @@ impl ChatWidget {
|
||||
self.bottom_pane.show_selection_view(
|
||||
"Select model and reasoning level".to_string(),
|
||||
Some("Switch between OpenAI models for this and future Codex CLI session".to_string()),
|
||||
Some("Press Enter to confirm or Esc to go back".to_string()),
|
||||
Some("Press Enter to confirm, Esc to go back, Ctrl+S to save".to_string()),
|
||||
items,
|
||||
);
|
||||
}
|
||||
@@ -1273,6 +1273,16 @@ impl ChatWidget {
|
||||
self.config.model = model;
|
||||
}
|
||||
|
||||
pub(crate) fn add_info_message(&mut self, message: String) {
|
||||
self.add_to_history(history_cell::new_info_event(message));
|
||||
self.request_redraw();
|
||||
}
|
||||
|
||||
pub(crate) fn add_error_message(&mut self, message: String) {
|
||||
self.add_to_history(history_cell::new_error_event(message));
|
||||
self.request_redraw();
|
||||
}
|
||||
|
||||
pub(crate) fn add_mcp_output(&mut self) {
|
||||
if self.config.mcp_servers.is_empty() {
|
||||
self.add_to_history(history_cell::empty_mcp_output());
|
||||
@@ -1316,6 +1326,11 @@ impl ChatWidget {
|
||||
self.bottom_pane.insert_str(text);
|
||||
}
|
||||
|
||||
/// Replace the composer content with the provided text and reset cursor.
|
||||
pub(crate) fn set_composer_text(&mut self, text: String) {
|
||||
self.bottom_pane.set_composer_text(text);
|
||||
}
|
||||
|
||||
pub(crate) fn show_esc_backtrack_hint(&mut self) {
|
||||
self.bottom_pane.show_esc_backtrack_hint();
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ pub(crate) fn spawn_agent(
|
||||
) -> UnboundedSender<Op> {
|
||||
let (codex_op_tx, mut codex_op_rx) = unbounded_channel::<Op>();
|
||||
|
||||
let app_event_tx_clone = app_event_tx.clone();
|
||||
let app_event_tx_clone = app_event_tx;
|
||||
tokio::spawn(async move {
|
||||
let NewConversation {
|
||||
conversation_id: _,
|
||||
@@ -71,7 +71,7 @@ pub(crate) fn spawn_agent_from_existing(
|
||||
) -> UnboundedSender<Op> {
|
||||
let (codex_op_tx, mut codex_op_rx) = unbounded_channel::<Op>();
|
||||
|
||||
let app_event_tx_clone = app_event_tx.clone();
|
||||
let app_event_tx_clone = app_event_tx;
|
||||
tokio::spawn(async move {
|
||||
// Forward the captured `SessionConfigured` event so it can be rendered in the UI.
|
||||
let ev = codex_core::protocol::Event {
|
||||
|
||||
@@ -352,7 +352,7 @@ fn exec_approval_decision_truncates_multiline_and_long_commands() {
|
||||
let long = format!("echo {}", "a".repeat(200));
|
||||
let ev_long = ExecApprovalRequestEvent {
|
||||
call_id: "call-long".into(),
|
||||
command: vec!["bash".into(), "-lc".into(), long.clone()],
|
||||
command: vec!["bash".into(), "-lc".into(), long],
|
||||
cwd: std::env::current_dir().unwrap_or_else(|_| PathBuf::from(".")),
|
||||
reason: None,
|
||||
};
|
||||
|
||||
@@ -737,10 +737,10 @@ mod tests {
|
||||
|
||||
let mut changes: HashMap<PathBuf, FileChange> = HashMap::new();
|
||||
changes.insert(
|
||||
abs_old.clone(),
|
||||
abs_old,
|
||||
FileChange::Update {
|
||||
unified_diff: patch,
|
||||
move_path: Some(abs_new.clone()),
|
||||
move_path: Some(abs_new),
|
||||
},
|
||||
);
|
||||
|
||||
|
||||
@@ -697,7 +697,7 @@ fn spinner(start_time: Option<Instant>) -> Span<'static> {
|
||||
|
||||
pub(crate) fn new_active_mcp_tool_call(invocation: McpInvocation) -> PlainHistoryCell {
|
||||
let title_line = Line::from(vec!["tool".magenta(), " running...".dim()]);
|
||||
let lines: Vec<Line> = vec![title_line, format_mcp_invocation(invocation.clone())];
|
||||
let lines: Vec<Line> = vec![title_line, format_mcp_invocation(invocation)];
|
||||
|
||||
PlainHistoryCell { lines }
|
||||
}
|
||||
@@ -1052,6 +1052,12 @@ pub(crate) fn new_mcp_tools_output(
|
||||
PlainHistoryCell { lines }
|
||||
}
|
||||
|
||||
pub(crate) fn new_info_event(message: String) -> PlainHistoryCell {
|
||||
let lines: Vec<Line<'static>> =
|
||||
vec![vec![padded_emoji("💾").green(), " ".into(), message.into()].into()];
|
||||
PlainHistoryCell { lines }
|
||||
}
|
||||
|
||||
pub(crate) fn new_error_event(message: String) -> PlainHistoryCell {
|
||||
// Use a hair space (U+200A) to create a subtle, near-invisible separation
|
||||
// before the text. VS16 is intentionally omitted to keep spacing tighter
|
||||
@@ -1324,7 +1330,7 @@ fn format_mcp_invocation<'a>(invocation: McpInvocation) -> Line<'a> {
|
||||
let invocation_spans = vec![
|
||||
invocation.server.clone().cyan(),
|
||||
".".into(),
|
||||
invocation.tool.clone().cyan(),
|
||||
invocation.tool.cyan(),
|
||||
"(".into(),
|
||||
args_str.dim(),
|
||||
")".into(),
|
||||
|
||||
@@ -11,8 +11,10 @@ use codex_core::RolloutRecorder;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigOverrides;
|
||||
use codex_core::config::ConfigToml;
|
||||
use codex_core::config::GPT5_HIGH_MODEL;
|
||||
use codex_core::config::find_codex_home;
|
||||
use codex_core::config::load_config_as_toml_with_cli_overrides;
|
||||
use codex_core::config::persist_model_selection;
|
||||
use codex_core::protocol::AskForApproval;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_ollama::DEFAULT_OSS_MODEL;
|
||||
@@ -47,6 +49,7 @@ pub mod live_wrap;
|
||||
mod markdown;
|
||||
mod markdown_render;
|
||||
mod markdown_stream;
|
||||
mod new_model_popup;
|
||||
pub mod onboarding;
|
||||
mod pager_overlay;
|
||||
mod render;
|
||||
@@ -65,12 +68,14 @@ mod wrapping;
|
||||
#[cfg(not(debug_assertions))]
|
||||
mod updates;
|
||||
|
||||
pub use cli::Cli;
|
||||
|
||||
use crate::new_model_popup::ModelUpgradeDecision;
|
||||
use crate::new_model_popup::run_model_upgrade_popup;
|
||||
use crate::onboarding::TrustDirectorySelection;
|
||||
use crate::onboarding::onboarding_screen::OnboardingScreenArgs;
|
||||
use crate::onboarding::onboarding_screen::run_onboarding_app;
|
||||
use crate::tui::Tui;
|
||||
pub use cli::Cli;
|
||||
use codex_core::internal_storage::InternalStorage;
|
||||
|
||||
// (tests access modules directly within the crate)
|
||||
|
||||
@@ -174,14 +179,21 @@ pub async fn run_main(
|
||||
}
|
||||
};
|
||||
|
||||
let cli_profile_override = cli.config_profile.clone();
|
||||
let active_profile = cli_profile_override
|
||||
.clone()
|
||||
.or_else(|| config_toml.profile.clone());
|
||||
|
||||
let should_show_trust_screen = determine_repo_trust_state(
|
||||
&mut config,
|
||||
&config_toml,
|
||||
approval_policy,
|
||||
sandbox_mode,
|
||||
cli.config_profile.clone(),
|
||||
cli_profile_override,
|
||||
)?;
|
||||
|
||||
let internal_storage = InternalStorage::load(&config.codex_home);
|
||||
|
||||
let log_dir = codex_core::config::log_dir(&config)?;
|
||||
std::fs::create_dir_all(&log_dir)?;
|
||||
// Open (or create) your log file, appending to it.
|
||||
@@ -224,14 +236,22 @@ pub async fn run_main(
|
||||
|
||||
let _ = tracing_subscriber::registry().with(file_layer).try_init();
|
||||
|
||||
run_ratatui_app(cli, config, should_show_trust_screen)
|
||||
.await
|
||||
.map_err(|err| std::io::Error::other(err.to_string()))
|
||||
run_ratatui_app(
|
||||
cli,
|
||||
config,
|
||||
internal_storage,
|
||||
active_profile,
|
||||
should_show_trust_screen,
|
||||
)
|
||||
.await
|
||||
.map_err(|err| std::io::Error::other(err.to_string()))
|
||||
}
|
||||
|
||||
async fn run_ratatui_app(
|
||||
cli: Cli,
|
||||
config: Config,
|
||||
mut internal_storage: InternalStorage,
|
||||
active_profile: Option<String>,
|
||||
should_show_trust_screen: bool,
|
||||
) -> color_eyre::Result<codex_core::protocol::TokenUsage> {
|
||||
let mut config = config;
|
||||
@@ -300,15 +320,7 @@ async fn run_ratatui_app(
|
||||
// Initialize high-fidelity session event logging if enabled.
|
||||
session_log::maybe_init(&config);
|
||||
|
||||
let Cli {
|
||||
prompt,
|
||||
images,
|
||||
resume,
|
||||
r#continue,
|
||||
..
|
||||
} = cli;
|
||||
|
||||
let auth_manager = AuthManager::shared(config.codex_home.clone(), config.preferred_auth_method);
|
||||
let auth_manager = AuthManager::shared(config.codex_home.clone());
|
||||
let login_status = get_login_status(&config);
|
||||
let should_show_onboarding =
|
||||
should_show_onboarding(login_status, &config, should_show_trust_screen);
|
||||
@@ -330,7 +342,7 @@ async fn run_ratatui_app(
|
||||
}
|
||||
}
|
||||
|
||||
let resume_selection = if r#continue {
|
||||
let resume_selection = if cli.r#continue {
|
||||
match RolloutRecorder::list_conversations(&config.codex_home, 1, None).await {
|
||||
Ok(page) => page
|
||||
.items
|
||||
@@ -339,7 +351,7 @@ async fn run_ratatui_app(
|
||||
.unwrap_or(resume_picker::ResumeSelection::StartFresh),
|
||||
Err(_) => resume_picker::ResumeSelection::StartFresh,
|
||||
}
|
||||
} else if resume {
|
||||
} else if cli.resume {
|
||||
match resume_picker::run_resume_picker(&mut tui, &config.codex_home).await? {
|
||||
resume_picker::ResumeSelection::Exit => {
|
||||
restore();
|
||||
@@ -352,10 +364,42 @@ async fn run_ratatui_app(
|
||||
resume_picker::ResumeSelection::StartFresh
|
||||
};
|
||||
|
||||
if should_show_model_rollout_prompt(
|
||||
&cli,
|
||||
&config,
|
||||
active_profile.as_deref(),
|
||||
internal_storage.gpt_5_high_model_prompt_seen,
|
||||
) {
|
||||
internal_storage.gpt_5_high_model_prompt_seen = true;
|
||||
if let Err(e) = internal_storage.persist().await {
|
||||
error!("Failed to persist internal storage: {e:?}");
|
||||
}
|
||||
|
||||
let upgrade_decision = run_model_upgrade_popup(&mut tui).await?;
|
||||
let switch_to_new_model = upgrade_decision == ModelUpgradeDecision::Switch;
|
||||
|
||||
if switch_to_new_model {
|
||||
config.model = GPT5_HIGH_MODEL.to_owned();
|
||||
if let Err(e) = persist_model_selection(
|
||||
&config.codex_home,
|
||||
active_profile.as_deref(),
|
||||
&config.model,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!("Failed to persist model selection: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let Cli { prompt, images, .. } = cli;
|
||||
|
||||
let app_result = App::run(
|
||||
&mut tui,
|
||||
auth_manager,
|
||||
config,
|
||||
active_profile,
|
||||
prompt,
|
||||
images,
|
||||
resume_selection,
|
||||
@@ -392,7 +436,7 @@ fn get_login_status(config: &Config) -> LoginStatus {
|
||||
// Reading the OpenAI API key is an async operation because it may need
|
||||
// to refresh the token. Block on it.
|
||||
let codex_home = config.codex_home.clone();
|
||||
match CodexAuth::from_codex_home(&codex_home, config.preferred_auth_method) {
|
||||
match CodexAuth::from_codex_home(&codex_home) {
|
||||
Ok(Some(auth)) => LoginStatus::AuthMode(auth.mode),
|
||||
Ok(None) => LoginStatus::NotAuthenticated,
|
||||
Err(err) => {
|
||||
@@ -460,30 +504,58 @@ fn should_show_login_screen(login_status: LoginStatus, config: &Config) -> bool
|
||||
return false;
|
||||
}
|
||||
|
||||
match login_status {
|
||||
LoginStatus::NotAuthenticated => true,
|
||||
LoginStatus::AuthMode(method) => method != config.preferred_auth_method,
|
||||
}
|
||||
login_status == LoginStatus::NotAuthenticated
|
||||
}
|
||||
|
||||
fn should_show_model_rollout_prompt(
|
||||
cli: &Cli,
|
||||
config: &Config,
|
||||
active_profile: Option<&str>,
|
||||
gpt_5_high_model_prompt_seen: bool,
|
||||
) -> bool {
|
||||
// TODO(jif) drop.
|
||||
let debug_high_enabled = std::env::var("DEBUG_HIGH")
|
||||
.map(|v| v.eq_ignore_ascii_case("1"))
|
||||
.unwrap_or(false);
|
||||
|
||||
active_profile.is_none()
|
||||
&& debug_high_enabled
|
||||
&& cli.model.is_none()
|
||||
&& !gpt_5_high_model_prompt_seen
|
||||
&& config.model_provider.requires_openai_auth
|
||||
&& !cli.oss
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use clap::Parser;
|
||||
use std::sync::Once;
|
||||
|
||||
fn make_config(preferred: AuthMode) -> Config {
|
||||
let mut cfg = Config::load_from_base_config_with_overrides(
|
||||
fn enable_debug_high_env() {
|
||||
static DEBUG_HIGH_ONCE: Once = Once::new();
|
||||
DEBUG_HIGH_ONCE.call_once(|| {
|
||||
// SAFETY: Tests run in a controlled environment and require this env variable to
|
||||
// opt into the GPT-5 High rollout prompt gating. We only set it once.
|
||||
unsafe {
|
||||
std::env::set_var("DEBUG_HIGH", "1");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn make_config() -> Config {
|
||||
enable_debug_high_env();
|
||||
Config::load_from_base_config_with_overrides(
|
||||
ConfigToml::default(),
|
||||
ConfigOverrides::default(),
|
||||
std::env::temp_dir(),
|
||||
)
|
||||
.expect("load default config");
|
||||
cfg.preferred_auth_method = preferred;
|
||||
cfg
|
||||
.expect("load default config")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shows_login_when_not_authenticated() {
|
||||
let cfg = make_config(AuthMode::ChatGPT);
|
||||
let cfg = make_config();
|
||||
assert!(should_show_login_screen(
|
||||
LoginStatus::NotAuthenticated,
|
||||
&cfg
|
||||
@@ -491,29 +563,35 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shows_login_when_api_key_but_prefers_chatgpt() {
|
||||
let cfg = make_config(AuthMode::ChatGPT);
|
||||
assert!(should_show_login_screen(
|
||||
LoginStatus::AuthMode(AuthMode::ApiKey),
|
||||
&cfg
|
||||
))
|
||||
fn shows_model_rollout_prompt_for_default_model() {
|
||||
let cli = Cli::parse_from(["codex"]);
|
||||
let cfg = make_config();
|
||||
assert!(should_show_model_rollout_prompt(&cli, &cfg, None, false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hides_login_when_api_key_and_prefers_api_key() {
|
||||
let cfg = make_config(AuthMode::ApiKey);
|
||||
assert!(!should_show_login_screen(
|
||||
LoginStatus::AuthMode(AuthMode::ApiKey),
|
||||
&cfg
|
||||
))
|
||||
fn hides_model_rollout_prompt_when_marked_seen() {
|
||||
let cli = Cli::parse_from(["codex"]);
|
||||
let cfg = make_config();
|
||||
assert!(!should_show_model_rollout_prompt(&cli, &cfg, None, true));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hides_login_when_chatgpt_and_prefers_chatgpt() {
|
||||
let cfg = make_config(AuthMode::ChatGPT);
|
||||
assert!(!should_show_login_screen(
|
||||
LoginStatus::AuthMode(AuthMode::ChatGPT),
|
||||
&cfg
|
||||
))
|
||||
fn hides_model_rollout_prompt_when_cli_overrides_model() {
|
||||
let cli = Cli::parse_from(["codex", "--model", "gpt-4.1"]);
|
||||
let cfg = make_config();
|
||||
assert!(!should_show_model_rollout_prompt(&cli, &cfg, None, false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hides_model_rollout_prompt_when_profile_active() {
|
||||
let cli = Cli::parse_from(["codex"]);
|
||||
let cfg = make_config();
|
||||
assert!(!should_show_model_rollout_prompt(
|
||||
&cli,
|
||||
&cfg,
|
||||
Some("gpt5"),
|
||||
false,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
155
codex-rs/tui/src/new_model_popup.rs
Normal file
155
codex-rs/tui/src/new_model_popup.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
use crate::tui::FrameRequester;
|
||||
use crate::tui::Tui;
|
||||
use crate::tui::TuiEvent;
|
||||
use codex_core::config::GPT5_HIGH_MODEL;
|
||||
use color_eyre::eyre::Result;
|
||||
use crossterm::event::KeyCode;
|
||||
use crossterm::event::KeyEvent;
|
||||
use ratatui::buffer::Buffer;
|
||||
use ratatui::layout::Rect;
|
||||
use ratatui::prelude::Widget;
|
||||
use ratatui::style::Stylize;
|
||||
use ratatui::text::Line;
|
||||
use ratatui::widgets::Clear;
|
||||
use ratatui::widgets::Paragraph;
|
||||
use ratatui::widgets::WidgetRef;
|
||||
use ratatui::widgets::Wrap;
|
||||
use tokio_stream::StreamExt;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub(crate) enum ModelUpgradeDecision {
|
||||
Switch,
|
||||
KeepCurrent,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
enum ModelUpgradeOption {
|
||||
TryNewModel,
|
||||
KeepCurrent,
|
||||
}
|
||||
|
||||
struct ModelUpgradePopup {
|
||||
highlighted: ModelUpgradeOption,
|
||||
decision: Option<ModelUpgradeDecision>,
|
||||
request_frame: FrameRequester,
|
||||
}
|
||||
|
||||
impl ModelUpgradePopup {
|
||||
fn new(request_frame: FrameRequester) -> Self {
|
||||
Self {
|
||||
highlighted: ModelUpgradeOption::TryNewModel,
|
||||
decision: None,
|
||||
request_frame,
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_key_event(&mut self, key_event: KeyEvent) {
|
||||
match key_event.code {
|
||||
KeyCode::Up | KeyCode::Char('k') => self.highlight(ModelUpgradeOption::TryNewModel),
|
||||
KeyCode::Down | KeyCode::Char('j') => self.highlight(ModelUpgradeOption::KeepCurrent),
|
||||
KeyCode::Char('1') => self.select(ModelUpgradeOption::TryNewModel),
|
||||
KeyCode::Char('2') => self.select(ModelUpgradeOption::KeepCurrent),
|
||||
KeyCode::Enter => self.select(self.highlighted),
|
||||
KeyCode::Esc => self.select(ModelUpgradeOption::KeepCurrent),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn highlight(&mut self, option: ModelUpgradeOption) {
|
||||
if self.highlighted != option {
|
||||
self.highlighted = option;
|
||||
self.request_frame.schedule_frame();
|
||||
}
|
||||
}
|
||||
|
||||
fn select(&mut self, option: ModelUpgradeOption) {
|
||||
self.decision = Some(option.into());
|
||||
self.request_frame.schedule_frame();
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ModelUpgradeOption> for ModelUpgradeDecision {
|
||||
fn from(option: ModelUpgradeOption) -> Self {
|
||||
match option {
|
||||
ModelUpgradeOption::TryNewModel => ModelUpgradeDecision::Switch,
|
||||
ModelUpgradeOption::KeepCurrent => ModelUpgradeDecision::KeepCurrent,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WidgetRef for &ModelUpgradePopup {
|
||||
fn render_ref(&self, area: Rect, buf: &mut Buffer) {
|
||||
Clear.render(area, buf);
|
||||
|
||||
let mut lines: Vec<Line> = vec![
|
||||
Line::from(vec![
|
||||
"> ".into(),
|
||||
format!("Try {GPT5_HIGH_MODEL} as your default model").bold(),
|
||||
]),
|
||||
format!(" {GPT5_HIGH_MODEL} is our latest model tuned for coding workflows.").into(),
|
||||
" Switch now or keep your current default – you can change models any time.".into(),
|
||||
"".into(),
|
||||
];
|
||||
|
||||
let create_option =
|
||||
|index: usize, option: ModelUpgradeOption, text: &str| -> Line<'static> {
|
||||
if self.highlighted == option {
|
||||
Line::from(vec![
|
||||
format!("> {}. ", index + 1).cyan(),
|
||||
text.to_owned().cyan(),
|
||||
])
|
||||
} else {
|
||||
format!(" {}. {text}", index + 1).into()
|
||||
}
|
||||
};
|
||||
|
||||
lines.push(create_option(
|
||||
0,
|
||||
ModelUpgradeOption::TryNewModel,
|
||||
&format!("Yes, switch me to {GPT5_HIGH_MODEL}"),
|
||||
));
|
||||
lines.push(create_option(
|
||||
1,
|
||||
ModelUpgradeOption::KeepCurrent,
|
||||
"Not right now",
|
||||
));
|
||||
lines.push("".into());
|
||||
lines.push(
|
||||
" Press Enter to confirm or Esc to keep your current model"
|
||||
.dim()
|
||||
.into(),
|
||||
);
|
||||
|
||||
Paragraph::new(lines)
|
||||
.wrap(Wrap { trim: false })
|
||||
.render(area, buf);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn run_model_upgrade_popup(tui: &mut Tui) -> Result<ModelUpgradeDecision> {
|
||||
let mut popup = ModelUpgradePopup::new(tui.frame_requester());
|
||||
|
||||
tui.draw(u16::MAX, |frame| {
|
||||
frame.render_widget_ref(&popup, frame.area());
|
||||
})?;
|
||||
|
||||
let events = tui.event_stream();
|
||||
tokio::pin!(events);
|
||||
while popup.decision.is_none() {
|
||||
if let Some(event) = events.next().await {
|
||||
match event {
|
||||
TuiEvent::Key(key_event) => popup.handle_key_event(key_event),
|
||||
TuiEvent::Draw => {
|
||||
let _ = tui.draw(u16::MAX, |frame| {
|
||||
frame.render_widget_ref(&popup, frame.area());
|
||||
});
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(popup.decision.unwrap_or(ModelUpgradeDecision::KeepCurrent))
|
||||
}
|
||||
@@ -2,12 +2,17 @@
|
||||
|
||||
use codex_core::AuthManager;
|
||||
use codex_core::auth::CLIENT_ID;
|
||||
use codex_core::auth::login_with_api_key;
|
||||
use codex_core::auth::read_openai_api_key_from_env;
|
||||
use codex_login::ServerOptions;
|
||||
use codex_login::ShutdownHandle;
|
||||
use codex_login::run_login_server;
|
||||
use crossterm::event::KeyCode;
|
||||
use crossterm::event::KeyEvent;
|
||||
use crossterm::event::KeyModifiers;
|
||||
use ratatui::buffer::Buffer;
|
||||
use ratatui::layout::Constraint;
|
||||
use ratatui::layout::Layout;
|
||||
use ratatui::layout::Rect;
|
||||
use ratatui::prelude::Widget;
|
||||
use ratatui::style::Color;
|
||||
@@ -15,6 +20,9 @@ use ratatui::style::Modifier;
|
||||
use ratatui::style::Style;
|
||||
use ratatui::style::Stylize;
|
||||
use ratatui::text::Line;
|
||||
use ratatui::widgets::Block;
|
||||
use ratatui::widgets::BorderType;
|
||||
use ratatui::widgets::Borders;
|
||||
use ratatui::widgets::Paragraph;
|
||||
use ratatui::widgets::WidgetRef;
|
||||
use ratatui::widgets::Wrap;
|
||||
@@ -38,8 +46,14 @@ pub(crate) enum SignInState {
|
||||
ChatGptContinueInBrowser(ContinueInBrowserState),
|
||||
ChatGptSuccessMessage,
|
||||
ChatGptSuccess,
|
||||
EnvVarMissing,
|
||||
EnvVarFound,
|
||||
ApiKeyEntry(ApiKeyInputState),
|
||||
ApiKeyConfigured,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct ApiKeyInputState {
|
||||
value: String,
|
||||
prepopulated_from_env: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -59,6 +73,10 @@ impl Drop for ContinueInBrowserState {
|
||||
|
||||
impl KeyboardHandler for AuthModeWidget {
|
||||
fn handle_key_event(&mut self, key_event: KeyEvent) {
|
||||
if self.handle_api_key_entry_key_event(&key_event) {
|
||||
return;
|
||||
}
|
||||
|
||||
match key_event.code {
|
||||
KeyCode::Up | KeyCode::Char('k') => {
|
||||
self.highlighted_mode = AuthMode::ChatGPT;
|
||||
@@ -69,7 +87,7 @@ impl KeyboardHandler for AuthModeWidget {
|
||||
KeyCode::Char('1') => {
|
||||
self.start_chatgpt_login();
|
||||
}
|
||||
KeyCode::Char('2') => self.verify_api_key(),
|
||||
KeyCode::Char('2') => self.start_api_key_entry(),
|
||||
KeyCode::Enter => {
|
||||
let sign_in_state = { (*self.sign_in_state.read().unwrap()).clone() };
|
||||
match sign_in_state {
|
||||
@@ -78,12 +96,9 @@ impl KeyboardHandler for AuthModeWidget {
|
||||
self.start_chatgpt_login();
|
||||
}
|
||||
AuthMode::ApiKey => {
|
||||
self.verify_api_key();
|
||||
self.start_api_key_entry();
|
||||
}
|
||||
},
|
||||
SignInState::EnvVarMissing => {
|
||||
*self.sign_in_state.write().unwrap() = SignInState::PickMode;
|
||||
}
|
||||
SignInState::ChatGptSuccessMessage => {
|
||||
*self.sign_in_state.write().unwrap() = SignInState::ChatGptSuccess;
|
||||
}
|
||||
@@ -101,6 +116,10 @@ impl KeyboardHandler for AuthModeWidget {
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_paste(&mut self, pasted: String) {
|
||||
let _ = self.handle_api_key_entry_paste(pasted);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -111,7 +130,6 @@ pub(crate) struct AuthModeWidget {
|
||||
pub sign_in_state: Arc<RwLock<SignInState>>,
|
||||
pub codex_home: PathBuf,
|
||||
pub login_status: LoginStatus,
|
||||
pub preferred_auth_method: AuthMode,
|
||||
pub auth_manager: Arc<AuthManager>,
|
||||
}
|
||||
|
||||
@@ -129,24 +147,6 @@ impl AuthModeWidget {
|
||||
"".into(),
|
||||
];
|
||||
|
||||
// If the user is already authenticated but the method differs from their
|
||||
// preferred auth method, show a brief explanation.
|
||||
if let LoginStatus::AuthMode(current) = self.login_status
|
||||
&& current != self.preferred_auth_method
|
||||
{
|
||||
let to_label = |mode: AuthMode| match mode {
|
||||
AuthMode::ApiKey => "API key",
|
||||
AuthMode::ChatGPT => "ChatGPT",
|
||||
};
|
||||
let msg = format!(
|
||||
" You’re currently using {} while your preferred method is {}.",
|
||||
to_label(current),
|
||||
to_label(self.preferred_auth_method)
|
||||
);
|
||||
lines.push(msg.into());
|
||||
lines.push("".into());
|
||||
}
|
||||
|
||||
let create_mode_item = |idx: usize,
|
||||
selected_mode: AuthMode,
|
||||
text: &str,
|
||||
@@ -175,29 +175,17 @@ impl AuthModeWidget {
|
||||
|
||||
vec![line1, line2]
|
||||
};
|
||||
let chatgpt_label = if matches!(self.login_status, LoginStatus::AuthMode(AuthMode::ChatGPT))
|
||||
{
|
||||
"Continue using ChatGPT"
|
||||
} else {
|
||||
"Sign in with ChatGPT"
|
||||
};
|
||||
|
||||
lines.extend(create_mode_item(
|
||||
0,
|
||||
AuthMode::ChatGPT,
|
||||
chatgpt_label,
|
||||
"Sign in with ChatGPT",
|
||||
"Usage included with Plus, Pro, and Team plans",
|
||||
));
|
||||
let api_key_label = if matches!(self.login_status, LoginStatus::AuthMode(AuthMode::ApiKey))
|
||||
{
|
||||
"Continue using API key"
|
||||
} else {
|
||||
"Provide your own API key"
|
||||
};
|
||||
lines.extend(create_mode_item(
|
||||
1,
|
||||
AuthMode::ApiKey,
|
||||
api_key_label,
|
||||
"Provide your own API key",
|
||||
"Pay for what you use",
|
||||
));
|
||||
lines.push("".into());
|
||||
@@ -282,26 +270,213 @@ impl AuthModeWidget {
|
||||
.render(area, buf);
|
||||
}
|
||||
|
||||
fn render_env_var_found(&self, area: Rect, buf: &mut Buffer) {
|
||||
let lines = vec!["✓ Using OPENAI_API_KEY".fg(Color::Green).into()];
|
||||
fn render_api_key_configured(&self, area: Rect, buf: &mut Buffer) {
|
||||
let lines = vec![
|
||||
"✓ API key configured".fg(Color::Green).into(),
|
||||
"".into(),
|
||||
" Codex will use usage-based billing with your API key.".into(),
|
||||
];
|
||||
|
||||
Paragraph::new(lines)
|
||||
.wrap(Wrap { trim: false })
|
||||
.render(area, buf);
|
||||
}
|
||||
|
||||
fn render_env_var_missing(&self, area: Rect, buf: &mut Buffer) {
|
||||
let lines = vec![
|
||||
" To use Codex with the OpenAI API, set OPENAI_API_KEY in your environment"
|
||||
.fg(Color::Cyan)
|
||||
.into(),
|
||||
"".into(),
|
||||
" Press Enter to return".dim().into(),
|
||||
];
|
||||
fn render_api_key_entry(&self, area: Rect, buf: &mut Buffer, state: &ApiKeyInputState) {
|
||||
let [intro_area, input_area, footer_area] = Layout::vertical([
|
||||
Constraint::Min(4),
|
||||
Constraint::Length(3),
|
||||
Constraint::Min(2),
|
||||
])
|
||||
.areas(area);
|
||||
|
||||
Paragraph::new(lines)
|
||||
let mut intro_lines: Vec<Line> = vec![
|
||||
Line::from(vec![
|
||||
"> ".into(),
|
||||
"Use your own OpenAI API key for usage-based billing".bold(),
|
||||
]),
|
||||
"".into(),
|
||||
" Paste or type your API key below. It will be stored locally in auth.json.".into(),
|
||||
"".into(),
|
||||
];
|
||||
if state.prepopulated_from_env {
|
||||
intro_lines.push(" Detected OPENAI_API_KEY environment variable.".into());
|
||||
intro_lines.push(
|
||||
" Paste a different key if you prefer to use another account."
|
||||
.dim()
|
||||
.into(),
|
||||
);
|
||||
intro_lines.push("".into());
|
||||
}
|
||||
Paragraph::new(intro_lines)
|
||||
.wrap(Wrap { trim: false })
|
||||
.render(area, buf);
|
||||
.render(intro_area, buf);
|
||||
|
||||
let content_line: Line = if state.value.is_empty() {
|
||||
vec!["Paste or type your API key".dim()].into()
|
||||
} else {
|
||||
Line::from(state.value.clone())
|
||||
};
|
||||
Paragraph::new(content_line)
|
||||
.wrap(Wrap { trim: false })
|
||||
.block(
|
||||
Block::default()
|
||||
.title("API key")
|
||||
.borders(Borders::ALL)
|
||||
.border_type(BorderType::Rounded)
|
||||
.border_style(Style::default().fg(Color::Cyan)),
|
||||
)
|
||||
.render(input_area, buf);
|
||||
|
||||
let mut footer_lines: Vec<Line> = vec![
|
||||
" Press Enter to save".dim().into(),
|
||||
" Press Esc to go back".dim().into(),
|
||||
];
|
||||
if let Some(error) = &self.error {
|
||||
footer_lines.push("".into());
|
||||
footer_lines.push(error.as_str().red().into());
|
||||
}
|
||||
Paragraph::new(footer_lines)
|
||||
.wrap(Wrap { trim: false })
|
||||
.render(footer_area, buf);
|
||||
}
|
||||
|
||||
fn handle_api_key_entry_key_event(&mut self, key_event: &KeyEvent) -> bool {
|
||||
let mut should_save: Option<String> = None;
|
||||
let mut should_request_frame = false;
|
||||
|
||||
{
|
||||
let mut guard = self.sign_in_state.write().unwrap();
|
||||
if let SignInState::ApiKeyEntry(state) = &mut *guard {
|
||||
match key_event.code {
|
||||
KeyCode::Esc => {
|
||||
*guard = SignInState::PickMode;
|
||||
self.error = None;
|
||||
should_request_frame = true;
|
||||
}
|
||||
KeyCode::Enter => {
|
||||
let trimmed = state.value.trim().to_string();
|
||||
if trimmed.is_empty() {
|
||||
self.error = Some("API key cannot be empty".to_string());
|
||||
should_request_frame = true;
|
||||
} else {
|
||||
should_save = Some(trimmed);
|
||||
}
|
||||
}
|
||||
KeyCode::Backspace => {
|
||||
if state.prepopulated_from_env {
|
||||
state.value.clear();
|
||||
state.prepopulated_from_env = false;
|
||||
} else {
|
||||
state.value.pop();
|
||||
}
|
||||
self.error = None;
|
||||
should_request_frame = true;
|
||||
}
|
||||
KeyCode::Char(c)
|
||||
if !key_event.modifiers.contains(KeyModifiers::CONTROL)
|
||||
&& !key_event.modifiers.contains(KeyModifiers::ALT) =>
|
||||
{
|
||||
if state.prepopulated_from_env {
|
||||
state.value.clear();
|
||||
state.prepopulated_from_env = false;
|
||||
}
|
||||
state.value.push(c);
|
||||
self.error = None;
|
||||
should_request_frame = true;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
// handled; let guard drop before potential save
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(api_key) = should_save {
|
||||
self.save_api_key(api_key);
|
||||
} else if should_request_frame {
|
||||
self.request_frame.schedule_frame();
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
fn handle_api_key_entry_paste(&mut self, pasted: String) -> bool {
|
||||
let trimmed = pasted.trim();
|
||||
if trimmed.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut guard = self.sign_in_state.write().unwrap();
|
||||
if let SignInState::ApiKeyEntry(state) = &mut *guard {
|
||||
if state.prepopulated_from_env {
|
||||
state.value = trimmed.to_string();
|
||||
state.prepopulated_from_env = false;
|
||||
} else {
|
||||
state.value.push_str(trimmed);
|
||||
}
|
||||
self.error = None;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
drop(guard);
|
||||
self.request_frame.schedule_frame();
|
||||
true
|
||||
}
|
||||
|
||||
fn start_api_key_entry(&mut self) {
|
||||
self.error = None;
|
||||
let prefill_from_env = read_openai_api_key_from_env();
|
||||
let mut guard = self.sign_in_state.write().unwrap();
|
||||
match &mut *guard {
|
||||
SignInState::ApiKeyEntry(state) => {
|
||||
if state.value.is_empty() {
|
||||
if let Some(prefill) = prefill_from_env {
|
||||
state.value = prefill;
|
||||
state.prepopulated_from_env = true;
|
||||
} else {
|
||||
state.prepopulated_from_env = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
*guard = SignInState::ApiKeyEntry(ApiKeyInputState {
|
||||
value: prefill_from_env.clone().unwrap_or_default(),
|
||||
prepopulated_from_env: prefill_from_env.is_some(),
|
||||
});
|
||||
}
|
||||
}
|
||||
drop(guard);
|
||||
self.request_frame.schedule_frame();
|
||||
}
|
||||
|
||||
fn save_api_key(&mut self, api_key: String) {
|
||||
match login_with_api_key(&self.codex_home, &api_key) {
|
||||
Ok(()) => {
|
||||
self.error = None;
|
||||
self.login_status = LoginStatus::AuthMode(AuthMode::ApiKey);
|
||||
self.auth_manager.reload();
|
||||
*self.sign_in_state.write().unwrap() = SignInState::ApiKeyConfigured;
|
||||
}
|
||||
Err(err) => {
|
||||
self.error = Some(format!("Failed to save API key: {err}"));
|
||||
let mut guard = self.sign_in_state.write().unwrap();
|
||||
if let SignInState::ApiKeyEntry(existing) = &mut *guard {
|
||||
if existing.value.is_empty() {
|
||||
existing.value.push_str(&api_key);
|
||||
}
|
||||
existing.prepopulated_from_env = false;
|
||||
} else {
|
||||
*guard = SignInState::ApiKeyEntry(ApiKeyInputState {
|
||||
value: api_key,
|
||||
prepopulated_from_env: false,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.request_frame.schedule_frame();
|
||||
}
|
||||
|
||||
fn start_chatgpt_login(&mut self) {
|
||||
@@ -354,18 +529,6 @@ impl AuthModeWidget {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO: Read/write from the correct hierarchy config overrides + auth json + OPENAI_API_KEY.
|
||||
fn verify_api_key(&mut self) {
|
||||
if matches!(self.login_status, LoginStatus::AuthMode(AuthMode::ApiKey)) {
|
||||
// We already have an API key configured (e.g., from auth.json or env),
|
||||
// so mark this step complete immediately.
|
||||
*self.sign_in_state.write().unwrap() = SignInState::EnvVarFound;
|
||||
} else {
|
||||
*self.sign_in_state.write().unwrap() = SignInState::EnvVarMissing;
|
||||
}
|
||||
self.request_frame.schedule_frame();
|
||||
}
|
||||
}
|
||||
|
||||
impl StepStateProvider for AuthModeWidget {
|
||||
@@ -373,10 +536,10 @@ impl StepStateProvider for AuthModeWidget {
|
||||
let sign_in_state = self.sign_in_state.read().unwrap();
|
||||
match &*sign_in_state {
|
||||
SignInState::PickMode
|
||||
| SignInState::EnvVarMissing
|
||||
| SignInState::ApiKeyEntry(_)
|
||||
| SignInState::ChatGptContinueInBrowser(_)
|
||||
| SignInState::ChatGptSuccessMessage => StepState::InProgress,
|
||||
SignInState::ChatGptSuccess | SignInState::EnvVarFound => StepState::Complete,
|
||||
SignInState::ChatGptSuccess | SignInState::ApiKeyConfigured => StepState::Complete,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -397,11 +560,11 @@ impl WidgetRef for AuthModeWidget {
|
||||
SignInState::ChatGptSuccess => {
|
||||
self.render_chatgpt_success(area, buf);
|
||||
}
|
||||
SignInState::EnvVarMissing => {
|
||||
self.render_env_var_missing(area, buf);
|
||||
SignInState::ApiKeyEntry(state) => {
|
||||
self.render_api_key_entry(area, buf, state);
|
||||
}
|
||||
SignInState::EnvVarFound => {
|
||||
self.render_env_var_found(area, buf);
|
||||
SignInState::ApiKeyConfigured => {
|
||||
self.render_api_key_configured(area, buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ enum Step {
|
||||
|
||||
pub(crate) trait KeyboardHandler {
|
||||
fn handle_key_event(&mut self, key_event: KeyEvent);
|
||||
fn handle_paste(&mut self, _pasted: String) {}
|
||||
}
|
||||
|
||||
pub(crate) enum StepState {
|
||||
@@ -69,9 +70,8 @@ impl OnboardingScreen {
|
||||
auth_manager,
|
||||
config,
|
||||
} = args;
|
||||
let preferred_auth_method = config.preferred_auth_method;
|
||||
let cwd = config.cwd.clone();
|
||||
let codex_home = config.codex_home.clone();
|
||||
let codex_home = config.codex_home;
|
||||
let mut steps: Vec<Step> = vec![Step::Welcome(WelcomeWidget {
|
||||
is_logged_in: !matches!(login_status, LoginStatus::NotAuthenticated),
|
||||
})];
|
||||
@@ -84,7 +84,6 @@ impl OnboardingScreen {
|
||||
codex_home: codex_home.clone(),
|
||||
login_status,
|
||||
auth_manager,
|
||||
preferred_auth_method,
|
||||
}))
|
||||
}
|
||||
let is_git_repo = get_git_repo_root(&cwd).is_some();
|
||||
@@ -194,6 +193,17 @@ impl KeyboardHandler for OnboardingScreen {
|
||||
};
|
||||
self.request_frame.schedule_frame();
|
||||
}
|
||||
|
||||
fn handle_paste(&mut self, pasted: String) {
|
||||
if pasted.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(active_step) = self.current_steps_mut().into_iter().last() {
|
||||
active_step.handle_paste(pasted);
|
||||
}
|
||||
self.request_frame.schedule_frame();
|
||||
}
|
||||
}
|
||||
|
||||
impl WidgetRef for &OnboardingScreen {
|
||||
@@ -263,6 +273,14 @@ impl KeyboardHandler for Step {
|
||||
Step::TrustDirectory(widget) => widget.handle_key_event(key_event),
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_paste(&mut self, pasted: String) {
|
||||
match self {
|
||||
Step::Welcome(_) => {}
|
||||
Step::Auth(widget) => widget.handle_paste(pasted),
|
||||
Step::TrustDirectory(widget) => widget.handle_paste(pasted),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StepStateProvider for Step {
|
||||
@@ -312,12 +330,14 @@ pub(crate) async fn run_onboarding_app(
|
||||
TuiEvent::Key(key_event) => {
|
||||
onboarding_screen.handle_key_event(key_event);
|
||||
}
|
||||
TuiEvent::Paste(text) => {
|
||||
onboarding_screen.handle_paste(text);
|
||||
}
|
||||
TuiEvent::Draw => {
|
||||
let _ = tui.draw(u16::MAX, |frame| {
|
||||
frame.render_widget_ref(&onboarding_screen, frame.area());
|
||||
});
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -244,7 +244,7 @@ impl UserApprovalWidget {
|
||||
"You ".into(),
|
||||
"approved".bold(),
|
||||
" codex to run ".into(),
|
||||
snippet.clone().dim(),
|
||||
snippet.dim(),
|
||||
" this time".bold(),
|
||||
]);
|
||||
}
|
||||
@@ -254,7 +254,7 @@ impl UserApprovalWidget {
|
||||
"You ".into(),
|
||||
"approved".bold(),
|
||||
" codex to run ".into(),
|
||||
snippet.clone().dim(),
|
||||
snippet.dim(),
|
||||
" every time this session".bold(),
|
||||
]);
|
||||
}
|
||||
@@ -264,7 +264,7 @@ impl UserApprovalWidget {
|
||||
"You ".into(),
|
||||
"did not approve".bold(),
|
||||
" codex to run ".into(),
|
||||
snippet.clone().dim(),
|
||||
snippet.dim(),
|
||||
]);
|
||||
}
|
||||
ReviewDecision::Abort => {
|
||||
@@ -273,7 +273,7 @@ impl UserApprovalWidget {
|
||||
"You ".into(),
|
||||
"canceled".bold(),
|
||||
" the request to run ".into(),
|
||||
snippet.clone().dim(),
|
||||
snippet.dim(),
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ Run Codex head-less in pipelines. Example GitHub Action step:
|
||||
- name: Update changelog via Codex
|
||||
run: |
|
||||
npm install -g @openai/codex
|
||||
export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}"
|
||||
codex login --api-key "${{ secrets.OPENAI_KEY }}"
|
||||
codex exec --full-auto "update CHANGELOG for next release"
|
||||
```
|
||||
|
||||
@@ -38,5 +38,49 @@ args = ["-y", "mcp-server"]
|
||||
env = { "API_KEY" = "value" }
|
||||
```
|
||||
|
||||
## Using Codex as an MCP Server
|
||||
|
||||
The Codex CLI can also be run as an MCP _server_ via `codex mcp`. For example, you can use `codex mcp` to make Codex available as a tool inside of a multi-agent framework like the OpenAI [Agents SDK](https://platform.openai.com/docs/guides/agents).
|
||||
|
||||
### Codex MCP Server Quickstart
|
||||
You can launch a Codex MCP server with the [Model Context Protocol Inspector](https://modelcontextprotocol.io/legacy/tools/inspector):
|
||||
|
||||
``` bash
|
||||
npx @modelcontextprotocol/inspector codex mcp
|
||||
```
|
||||
Send a `tools/list` request and you will see that there are two tools available:
|
||||
|
||||
**`codex`** - Run a Codex session. Accepts configuration parameters matching the Codex Config struct. The `codex` tool takes the following properties:
|
||||
|
||||
Property | Type | Description
|
||||
-------------------|----------|----------------------------------------------------------------------------------------------------------
|
||||
approval-policy | string | Approval policy for shell commands generated by the model: `untrusted`, `on-failure`, `never`.
|
||||
base-instructions | string | The set of instructions to use instead of the default ones.
|
||||
config | object | Individual [config settings](https://github.com/openai/codex/blob/main/docs/config.md#config) that will override what is in CODEX_HOME/config.toml.
|
||||
cwd | string | Working directory for the session. If relative, resolved against the server process's current directory.
|
||||
include-plan-tool | boolean | Whether to include the plan tool in the conversation.
|
||||
model | string | Optional override for the model name (e.g. "o3", "o4-mini").
|
||||
profile | string | Configuration profile from config.toml to specify default options.
|
||||
**prompt** (required) | string | The initial user prompt to start the Codex conversation.
|
||||
sandbox | string | Sandbox mode: `read-only`, `workspace-write`, or `danger-full-access`.
|
||||
|
||||
**`codex-reply`** - Continue a Codex session by providing the session id and prompt. The `codex-reply` tool takes the following properties:
|
||||
|
||||
Property | Type | Description
|
||||
-----------|--------|---------------------------------------------------------------
|
||||
**prompt** (required) | string | The next user prompt to continue the Codex conversation.
|
||||
**conversationId** (required) | string | The id of the conversation to continue.
|
||||
|
||||
### Trying it Out
|
||||
> [!TIP]
|
||||
> It is somewhat experimental, but the Codex CLI can also be run as an MCP _server_ via `codex mcp`. If you launch it with an MCP client such as `npx @modelcontextprotocol/inspector codex mcp` and send it a `tools/list` request, you will see that there is only one tool, `codex`, that accepts a grab-bag of inputs, including a catch-all `config` map for anything you might want to override. Feel free to play around with it and provide feedback via GitHub issues.
|
||||
> Codex often takes a few minutes to run. To accommodate this, adjust the MCP inspector's Request and Total timeouts to 600000ms (10 minutes) under ⛭ Configuration.
|
||||
|
||||
Use the MCP inspector and `codex mcp` to build a simple tic-tac-toe game with the following settings:
|
||||
|
||||
**approval-policy:** never
|
||||
|
||||
**prompt:** Implement a simple tic-tac-toe game with HTML, Javascript, and CSS. Write the game in a single file called index.html.
|
||||
|
||||
**sandbox:** workspace-write
|
||||
|
||||
Click "Run Tool" and you should see a list of events emitted from the Codex MCP server as it builds the game.
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
|
||||
## Usage-based billing alternative: Use an OpenAI API key
|
||||
|
||||
If you prefer to pay-as-you-go, you can still authenticate with your OpenAI API key by setting it as an environment variable:
|
||||
If you prefer to pay-as-you-go, you can still authenticate with your OpenAI API key:
|
||||
|
||||
```shell
|
||||
export OPENAI_API_KEY="your-api-key-here"
|
||||
codex login --api-key "your-api-key-here"
|
||||
```
|
||||
|
||||
This key must, at minimum, have write access to the Responses API.
|
||||
@@ -18,36 +18,6 @@ If you've used the Codex CLI before with usage-based billing via an API key and
|
||||
2. Delete `~/.codex/auth.json` (on Windows: `C:\\Users\\USERNAME\\.codex\\auth.json`)
|
||||
3. Run `codex login` again
|
||||
|
||||
## Forcing a specific auth method (advanced)
|
||||
|
||||
You can explicitly choose which authentication Codex should prefer when both are available.
|
||||
|
||||
- To always use your API key (even when ChatGPT auth exists), set:
|
||||
|
||||
```toml
|
||||
# ~/.codex/config.toml
|
||||
preferred_auth_method = "apikey"
|
||||
```
|
||||
|
||||
Or override ad-hoc via CLI:
|
||||
|
||||
```bash
|
||||
codex --config preferred_auth_method="apikey"
|
||||
```
|
||||
|
||||
- To prefer ChatGPT auth (default), set:
|
||||
|
||||
```toml
|
||||
# ~/.codex/config.toml
|
||||
preferred_auth_method = "chatgpt"
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- When `preferred_auth_method = "apikey"` and an API key is available, the login screen is skipped.
|
||||
- When `preferred_auth_method = "chatgpt"` (default), Codex prefers ChatGPT auth if present; if only an API key is present, it will use the API key. Certain account types may also require API-key mode.
|
||||
- To check which auth method is being used during a session, use the `/status` command in the TUI.
|
||||
|
||||
## Connecting on a "Headless" Machine
|
||||
|
||||
Today, the login process entails running a server on `localhost:1455`. If you are on a "headless" server, such as a Docker container or are `ssh`'d into a remote machine, loading `localhost:1455` in the browser on your local machine will not automatically connect to the webserver running on the _headless_ machine, so you must use one of the following workarounds:
|
||||
|
||||
@@ -612,5 +612,4 @@ Options that are specific to the TUI.
|
||||
| `experimental_use_exec_command_tool` | boolean | Use experimental exec command tool. |
|
||||
| `responses_originator_header_internal_override` | string | Override `originator` header value. |
|
||||
| `projects.<path>.trust_level` | string | Mark project/worktree as trusted (only `"trusted"` is recognized). |
|
||||
| `preferred_auth_method` | `chatgpt` \| `apikey` | Select default auth method (default: `chatgpt`). |
|
||||
| `tools.web_search` | boolean | Enable web search tool (alias: `web_search_request`) (default: false). |
|
||||
|
||||
Reference in New Issue
Block a user