Compare commits

..

1 Commits

Author SHA1 Message Date
Ahmed Ibrahim
8aa5e7770c fmt + clippy: codex-core deterministic shell tool tests, conflict cleanup 2025-09-10 23:42:57 -07:00
32 changed files with 719 additions and 771 deletions

View File

@@ -62,8 +62,6 @@ jobs:
components: rustfmt
- name: cargo fmt
run: cargo fmt -- --config imports_granularity=Item --check
- name: Verify codegen for mcp-types
run: ./mcp-types/check_lib_rs.py
cargo_shear:
name: cargo shear

View File

@@ -219,22 +219,3 @@ jobs:
with:
tag: ${{ github.ref_name }}
config: .github/dotslash-config.json
update-branch:
name: Update latest-alpha-cli branch
permissions:
contents: write
needs: release
runs-on: ubuntu-latest
steps:
- name: Update latest-alpha-cli branch
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set -euo pipefail
gh api \
repos/${GITHUB_REPOSITORY}/git/refs/heads/latest-alpha-cli \
-X PATCH \
-f sha="${GITHUB_SHA}" \
-f force=true

1
codex-rs/Cargo.lock generated
View File

@@ -561,6 +561,7 @@ dependencies = [
"clap",
"codex-common",
"codex-core",
"codex-protocol",
"serde",
"serde_json",
"tempfile",

View File

@@ -733,8 +733,6 @@ fn compute_replacements(
}
}
replacements.sort_by(|(lhs_idx, _, _), (rhs_idx, _, _)| lhs_idx.cmp(rhs_idx));
Ok(replacements)
}
@@ -1218,33 +1216,6 @@ PATCH"#,
assert_eq!(contents, "a\nB\nc\nd\nE\nf\ng\n");
}
#[test]
fn test_pure_addition_chunk_followed_by_removal() {
let dir = tempdir().unwrap();
let path = dir.path().join("panic.txt");
fs::write(&path, "line1\nline2\nline3\n").unwrap();
let patch = wrap_patch(&format!(
r#"*** Update File: {}
@@
+after-context
+second-line
@@
line1
-line2
-line3
+line2-replacement"#,
path.display()
));
let mut stdout = Vec::new();
let mut stderr = Vec::new();
apply_patch(&patch, &mut stdout, &mut stderr).unwrap();
let contents = fs::read_to_string(path).unwrap();
assert_eq!(
contents,
"line1\nline2-replacement\nafter-context\nsecond-line\n"
);
}
/// Ensure that patches authored with ASCII characters can update lines that
/// contain typographic Unicode punctuation (e.g. EN DASH, NON-BREAKING
/// HYPHEN). Historically `git apply` succeeds in such scenarios but our

View File

@@ -11,6 +11,7 @@ anyhow = "1"
clap = { version = "4", features = ["derive"] }
codex-common = { path = "../common", features = ["cli"] }
codex-core = { path = "../core" }
codex-protocol = { path = "../protocol" }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
tokio = { version = "1", features = ["full"] }

View File

@@ -1,4 +1,5 @@
use codex_core::CodexAuth;
use codex_protocol::mcp_protocol::AuthMode;
use std::path::Path;
use std::sync::LazyLock;
use std::sync::RwLock;
@@ -19,7 +20,7 @@ pub fn set_chatgpt_token_data(value: TokenData) {
/// Initialize the ChatGPT token from auth.json file
pub async fn init_chatgpt_token_from_auth(codex_home: &Path) -> std::io::Result<()> {
let auth = CodexAuth::from_codex_home(codex_home)?;
let auth = CodexAuth::from_codex_home(codex_home, AuthMode::ChatGPT)?;
if let Some(auth) = auth {
let token_data = auth.get_token_data().await?;
set_chatgpt_token_data(token_data);

View File

@@ -1,6 +1,7 @@
use codex_common::CliConfigOverrides;
use codex_core::CodexAuth;
use codex_core::auth::CLIENT_ID;
use codex_core::auth::OPENAI_API_KEY_ENV_VAR;
use codex_core::auth::login_with_api_key;
use codex_core::auth::logout;
use codex_core::config::Config;
@@ -8,6 +9,7 @@ use codex_core::config::ConfigOverrides;
use codex_login::ServerOptions;
use codex_login::run_login_server;
use codex_protocol::mcp_protocol::AuthMode;
use std::env;
use std::path::PathBuf;
pub async fn login_with_chatgpt(codex_home: PathBuf) -> std::io::Result<()> {
@@ -58,11 +60,19 @@ pub async fn run_login_with_api_key(
pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
let config = load_config_or_exit(cli_config_overrides);
match CodexAuth::from_codex_home(&config.codex_home) {
match CodexAuth::from_codex_home(&config.codex_home, config.preferred_auth_method) {
Ok(Some(auth)) => match auth.mode {
AuthMode::ApiKey => match auth.get_token().await {
Ok(api_key) => {
eprintln!("Logged in using an API key - {}", safe_format_key(&api_key));
if let Ok(env_api_key) = env::var(OPENAI_API_KEY_ENV_VAR)
&& env_api_key == api_key
{
eprintln!(
" API loaded from OPENAI_API_KEY environment variable or .env file"
);
}
std::process::exit(0);
}
Err(e) => {

View File

@@ -37,8 +37,10 @@ pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> {
let config = Config::load_with_cli_overrides(overrides_vec, ConfigOverrides::default())?;
// Use conversation_manager API to start a conversation
let conversation_manager =
ConversationManager::new(AuthManager::shared(config.codex_home.clone()));
let conversation_manager = ConversationManager::new(AuthManager::shared(
config.codex_home.clone(),
config.preferred_auth_method,
));
let NewConversation {
conversation_id: _,
conversation,

View File

@@ -70,9 +70,13 @@ impl CodexAuth {
Ok(access)
}
/// Loads the available auth information from the auth.json.
pub fn from_codex_home(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> {
load_auth(codex_home)
/// Loads the available auth information from the auth.json or
/// OPENAI_API_KEY environment variable.
pub fn from_codex_home(
codex_home: &Path,
preferred_auth_method: AuthMode,
) -> std::io::Result<Option<CodexAuth>> {
load_auth(codex_home, true, preferred_auth_method)
}
pub async fn get_token_data(&self) -> Result<TokenData, std::io::Error> {
@@ -189,11 +193,10 @@ impl CodexAuth {
pub const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY";
pub fn read_openai_api_key_from_env() -> Option<String> {
fn read_openai_api_key_from_env() -> Option<String> {
env::var(OPENAI_API_KEY_ENV_VAR)
.ok()
.map(|value| value.trim().to_string())
.filter(|value| !value.is_empty())
.filter(|s| !s.is_empty())
}
pub fn get_auth_file(codex_home: &Path) -> PathBuf {
@@ -211,7 +214,7 @@ pub fn logout(codex_home: &Path) -> std::io::Result<bool> {
}
}
/// Writes an `auth.json` that contains only the API key.
/// Writes an `auth.json` that contains only the API key. Intended for CLI use.
pub fn login_with_api_key(codex_home: &Path, api_key: &str) -> std::io::Result<()> {
let auth_dot_json = AuthDotJson {
openai_api_key: Some(api_key.to_string()),
@@ -221,11 +224,28 @@ pub fn login_with_api_key(codex_home: &Path, api_key: &str) -> std::io::Result<(
write_auth_json(&get_auth_file(codex_home), &auth_dot_json)
}
fn load_auth(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> {
fn load_auth(
codex_home: &Path,
include_env_var: bool,
preferred_auth_method: AuthMode,
) -> std::io::Result<Option<CodexAuth>> {
// First, check to see if there is a valid auth.json file. If not, we fall
// back to AuthMode::ApiKey using the OPENAI_API_KEY environment variable
// (if it is set).
let auth_file = get_auth_file(codex_home);
let client = crate::default_client::create_client();
let auth_dot_json = match try_read_auth_json(&auth_file) {
Ok(auth) => auth,
// If auth.json does not exist, try to read the OPENAI_API_KEY from the
// environment variable.
Err(e) if e.kind() == std::io::ErrorKind::NotFound && include_env_var => {
return match read_openai_api_key_from_env() {
Some(api_key) => Ok(Some(CodexAuth::from_api_key_with_client(&api_key, client))),
None => Ok(None),
};
}
// Though if auth.json exists but is malformed, do not fall back to the
// env var because the user may be expecting to use AuthMode::ChatGPT.
Err(e) => {
return Err(e);
}
@@ -237,11 +257,32 @@ fn load_auth(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> {
last_refresh,
} = auth_dot_json;
// Prefer AuthMode.ApiKey if it's set in the auth.json.
// If the auth.json has an API key AND does not appear to be on a plan that
// should prefer AuthMode::ChatGPT, use AuthMode::ApiKey.
if let Some(api_key) = &auth_json_api_key {
return Ok(Some(CodexAuth::from_api_key_with_client(api_key, client)));
// Should any of these be AuthMode::ChatGPT with the api_key set?
// Does AuthMode::ChatGPT indicate that there is an auth.json that is
// "refreshable" even if we are using the API key for auth?
match &tokens {
Some(tokens) => {
if tokens.should_use_api_key(preferred_auth_method, tokens.is_openai_email()) {
return Ok(Some(CodexAuth::from_api_key_with_client(api_key, client)));
} else {
// Ignore the API key and fall through to ChatGPT auth.
}
}
None => {
// We have an API key but no tokens in the auth.json file.
// Perhaps the user ran `codex login --api-key <KEY>` or updated
// auth.json by hand. Either way, let's assume they are trying
// to use their API key.
return Ok(Some(CodexAuth::from_api_key_with_client(api_key, client)));
}
}
}
// For the AuthMode::ChatGPT variant, perhaps neither api_key nor
// openai_api_key should exist?
Ok(Some(CodexAuth {
api_key: None,
mode: AuthMode::ChatGPT,
@@ -371,6 +412,7 @@ use std::sync::RwLock;
/// Internal cached auth state.
#[derive(Clone, Debug)]
struct CachedAuth {
preferred_auth_mode: AuthMode,
auth: Option<CodexAuth>,
}
@@ -426,7 +468,9 @@ mod tests {
auth_dot_json,
auth_file: _,
..
} = super::load_auth(codex_home.path()).unwrap().unwrap();
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT)
.unwrap()
.unwrap();
assert_eq!(None, api_key);
assert_eq!(AuthMode::ChatGPT, mode);
@@ -455,6 +499,88 @@ mod tests {
)
}
/// Even if the OPENAI_API_KEY is set in auth.json, if the plan is not in
/// [`TokenData::is_plan_that_should_use_api_key`], it should use
/// [`AuthMode::ChatGPT`].
#[tokio::test]
async fn pro_account_with_api_key_still_uses_chatgpt_auth() {
let codex_home = tempdir().unwrap();
let fake_jwt = write_auth_file(
AuthFileParams {
openai_api_key: Some("sk-test-key".to_string()),
chatgpt_plan_type: "pro".to_string(),
},
codex_home.path(),
)
.expect("failed to write auth file");
let CodexAuth {
api_key,
mode,
auth_dot_json,
auth_file: _,
..
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT)
.unwrap()
.unwrap();
assert_eq!(None, api_key);
assert_eq!(AuthMode::ChatGPT, mode);
let guard = auth_dot_json.lock().unwrap();
let auth_dot_json = guard.as_ref().expect("AuthDotJson should exist");
assert_eq!(
&AuthDotJson {
openai_api_key: None,
tokens: Some(TokenData {
id_token: IdTokenInfo {
email: Some("user@example.com".to_string()),
chatgpt_plan_type: Some(PlanType::Known(KnownPlan::Pro)),
raw_jwt: fake_jwt,
},
access_token: "test-access-token".to_string(),
refresh_token: "test-refresh-token".to_string(),
account_id: None,
}),
last_refresh: Some(
DateTime::parse_from_rfc3339(LAST_REFRESH)
.unwrap()
.with_timezone(&Utc)
),
},
auth_dot_json
)
}
/// If the OPENAI_API_KEY is set in auth.json and it is an enterprise
/// account, then it should use [`AuthMode::ApiKey`].
#[tokio::test]
async fn enterprise_account_with_api_key_uses_apikey_auth() {
let codex_home = tempdir().unwrap();
write_auth_file(
AuthFileParams {
openai_api_key: Some("sk-test-key".to_string()),
chatgpt_plan_type: "enterprise".to_string(),
},
codex_home.path(),
)
.expect("failed to write auth file");
let CodexAuth {
api_key,
mode,
auth_dot_json,
auth_file: _,
..
} = super::load_auth(codex_home.path(), false, AuthMode::ChatGPT)
.unwrap()
.unwrap();
assert_eq!(Some("sk-test-key".to_string()), api_key);
assert_eq!(AuthMode::ApiKey, mode);
let guard = auth_dot_json.lock().expect("should unwrap");
assert!(guard.is_none(), "auth_dot_json should be None");
}
#[tokio::test]
async fn loads_api_key_from_auth_json() {
let dir = tempdir().unwrap();
@@ -465,7 +591,9 @@ mod tests {
)
.unwrap();
let auth = super::load_auth(dir.path()).unwrap().unwrap();
let auth = super::load_auth(dir.path(), false, AuthMode::ChatGPT)
.unwrap()
.unwrap();
assert_eq!(auth.mode, AuthMode::ApiKey);
assert_eq!(auth.api_key, Some("sk-test-key".to_string()));
@@ -555,17 +683,26 @@ impl AuthManager {
/// preferred auth method. Errors loading auth are swallowed; `auth()` will
/// simply return `None` in that case so callers can treat it as an
/// unauthenticated state.
pub fn new(codex_home: PathBuf) -> Self {
let auth = CodexAuth::from_codex_home(&codex_home).ok().flatten();
pub fn new(codex_home: PathBuf, preferred_auth_mode: AuthMode) -> Self {
let auth = CodexAuth::from_codex_home(&codex_home, preferred_auth_mode)
.ok()
.flatten();
Self {
codex_home,
inner: RwLock::new(CachedAuth { auth }),
inner: RwLock::new(CachedAuth {
preferred_auth_mode,
auth,
}),
}
}
/// Create an AuthManager with a specific CodexAuth, for testing only.
pub fn from_auth_for_testing(auth: CodexAuth) -> Arc<Self> {
let cached = CachedAuth { auth: Some(auth) };
let preferred_auth_mode = auth.mode;
let cached = CachedAuth {
preferred_auth_mode,
auth: Some(auth),
};
Arc::new(Self {
codex_home: PathBuf::new(),
inner: RwLock::new(cached),
@@ -577,10 +714,21 @@ impl AuthManager {
self.inner.read().ok().and_then(|c| c.auth.clone())
}
/// Force a reload of the auth information from auth.json. Returns
/// Preferred auth method used when (re)loading.
pub fn preferred_auth_method(&self) -> AuthMode {
self.inner
.read()
.map(|c| c.preferred_auth_mode)
.unwrap_or(AuthMode::ApiKey)
}
/// Force a reload using the existing preferred auth method. Returns
/// whether the auth value changed.
pub fn reload(&self) -> bool {
let new_auth = CodexAuth::from_codex_home(&self.codex_home).ok().flatten();
let preferred = self.preferred_auth_method();
let new_auth = CodexAuth::from_codex_home(&self.codex_home, preferred)
.ok()
.flatten();
if let Ok(mut guard) = self.inner.write() {
let changed = !AuthManager::auths_equal(&guard.auth, &new_auth);
guard.auth = new_auth;
@@ -599,8 +747,8 @@ impl AuthManager {
}
/// Convenience constructor returning an `Arc` wrapper.
pub fn shared(codex_home: PathBuf) -> Arc<Self> {
Arc::new(Self::new(codex_home))
pub fn shared(codex_home: PathBuf, preferred_auth_mode: AuthMode) -> Arc<Self> {
Arc::new(Self::new(codex_home, preferred_auth_mode))
}
/// Attempt to refresh the current auth token (if any). On success, reload

View File

@@ -468,7 +468,6 @@ impl Session {
tools_config: ToolsConfig::new(&ToolsConfigParams {
model_family: &config.model_family,
approval_policy,
sandbox_policy: sandbox_policy.clone(),
include_plan_tool: config.include_plan_tool,
include_apply_patch_tool: config.include_apply_patch_tool,
include_web_search_request: config.tools_web_search_request,
@@ -1148,7 +1147,6 @@ async fn submission_loop(
let tools_config = ToolsConfig::new(&ToolsConfigParams {
model_family: &effective_family,
approval_policy: new_approval_policy,
sandbox_policy: new_sandbox_policy.clone(),
include_plan_tool: config.include_plan_tool,
include_apply_patch_tool: config.include_apply_patch_tool,
include_web_search_request: config.tools_web_search_request,
@@ -1186,26 +1184,18 @@ async fn submission_loop(
{
warn!("failed to persist overrides: {e:#}");
}
if cwd.is_some() || approval_policy.is_some() || sandbox_policy.is_some() {
sess.record_conversation_items(&[ResponseItem::from(EnvironmentContext::new(
cwd,
approval_policy,
sandbox_policy,
// Shell is not configurable from turn to turn
None,
))])
.await;
}
}
Op::UserInput { items } => {
// attempt to inject input into current task
if let Err(items) = sess.inject_input(items) {
// no current task, spawn a new one
let task =
AgentTask::spawn(sess.clone(), Arc::clone(&turn_context), sub.id, items);
sess.set_task(task);
}
submit_user_input(
turn_context.cwd.clone(),
turn_context.approval_policy,
turn_context.sandbox_policy.clone(),
&sess,
&turn_context,
sub.id.clone(),
items,
)
.await;
}
Op::UserTurn {
items,
@@ -1250,7 +1240,6 @@ async fn submission_loop(
tools_config: ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy,
sandbox_policy: sandbox_policy.clone(),
include_plan_tool: config.include_plan_tool,
include_apply_patch_tool: config.include_apply_patch_tool,
include_web_search_request: config.tools_web_search_request,
@@ -1267,11 +1256,16 @@ async fn submission_loop(
shell_environment_policy: turn_context.shell_environment_policy.clone(),
cwd,
};
// TODO: record the new environment context in the conversation history
// no current task, spawn a new one with the perturn context
let task =
AgentTask::spawn(sess.clone(), Arc::new(fresh_turn_context), sub.id, items);
sess.set_task(task);
submit_user_input(
fresh_turn_context.cwd.clone(),
fresh_turn_context.approval_policy,
fresh_turn_context.sandbox_policy.clone(),
&sess,
&Arc::new(fresh_turn_context),
sub.id.clone(),
items,
)
.await;
}
}
Op::ExecApproval { id, decision } => match decision {
@@ -2833,6 +2827,30 @@ async fn handle_sandbox_error(
}
}
async fn submit_user_input(
cwd: PathBuf,
approval_policy: AskForApproval,
sandbox_policy: SandboxPolicy,
sess: &Arc<Session>,
turn_context: &Arc<TurnContext>,
sub_id: String,
items: Vec<InputItem>,
) {
sess.record_conversation_items(&[ResponseItem::from(EnvironmentContext::new(
Some(cwd),
Some(approval_policy),
Some(sandbox_policy),
// Shell is not configurable from turn to turn
None,
))])
.await;
if let Err(items) = sess.inject_input(items) {
// no current task, spawn a new one
let task = AgentTask::spawn(Arc::clone(sess), Arc::clone(turn_context), sub_id, items);
sess.set_task(task);
}
}
fn format_exec_output_str(exec_output: &ExecToolCallOutput) -> String {
let ExecToolCallOutput {
aggregated_output, ..

View File

@@ -19,6 +19,7 @@ use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode;
use codex_protocol::config_types::Verbosity;
use codex_protocol::mcp_protocol::AuthMode;
use codex_protocol::mcp_protocol::Tools;
use codex_protocol::mcp_protocol::UserSavedConfig;
use dirs::home_dir;
@@ -166,6 +167,9 @@ pub struct Config {
pub tools_web_search_request: bool,
/// If set to `true`, the API key will be signed with the `originator` header.
pub preferred_auth_method: AuthMode,
pub use_experimental_streamable_shell_tool: bool,
/// If set to `true`, used only the experimental unified exec tool.
@@ -490,6 +494,9 @@ pub struct ConfigToml {
pub projects: Option<HashMap<String, ProjectConfig>>,
/// If set to `true`, the API key will be signed with the `originator` header.
pub preferred_auth_method: Option<AuthMode>,
/// Nested tools section for feature toggles
pub tools: Option<ToolsToml>,
@@ -830,12 +837,13 @@ impl Config {
include_plan_tool: include_plan_tool.unwrap_or(false),
include_apply_patch_tool: include_apply_patch_tool.unwrap_or(false),
tools_web_search_request,
preferred_auth_method: cfg.preferred_auth_method.unwrap_or(AuthMode::ChatGPT),
use_experimental_streamable_shell_tool: cfg
.experimental_use_exec_command_tool
.unwrap_or(false),
use_experimental_unified_exec_tool: cfg
.experimental_use_unified_exec_tool
.unwrap_or(false),
.unwrap_or(true),
include_view_image_tool,
active_profile: active_profile_name,
disable_paste_burst: cfg.disable_paste_burst.unwrap_or(false),
@@ -1209,8 +1217,9 @@ model_verbosity = "high"
include_plan_tool: false,
include_apply_patch_tool: false,
tools_web_search_request: false,
preferred_auth_method: AuthMode::ChatGPT,
use_experimental_streamable_shell_tool: false,
use_experimental_unified_exec_tool: false,
use_experimental_unified_exec_tool: true,
include_view_image_tool: true,
active_profile: Some("o3".to_string()),
disable_paste_burst: false,
@@ -1266,8 +1275,9 @@ model_verbosity = "high"
include_plan_tool: false,
include_apply_patch_tool: false,
tools_web_search_request: false,
preferred_auth_method: AuthMode::ChatGPT,
use_experimental_streamable_shell_tool: false,
use_experimental_unified_exec_tool: false,
use_experimental_unified_exec_tool: true,
include_view_image_tool: true,
active_profile: Some("gpt3".to_string()),
disable_paste_burst: false,
@@ -1338,8 +1348,9 @@ model_verbosity = "high"
include_plan_tool: false,
include_apply_patch_tool: false,
tools_web_search_request: false,
preferred_auth_method: AuthMode::ChatGPT,
use_experimental_streamable_shell_tool: false,
use_experimental_unified_exec_tool: false,
use_experimental_unified_exec_tool: true,
include_view_image_tool: true,
active_profile: Some("zdr".to_string()),
disable_paste_burst: false,
@@ -1396,8 +1407,9 @@ model_verbosity = "high"
include_plan_tool: false,
include_apply_patch_tool: false,
tools_web_search_request: false,
preferred_auth_method: AuthMode::ChatGPT,
use_experimental_streamable_shell_tool: false,
use_experimental_unified_exec_tool: false,
use_experimental_unified_exec_tool: true,
include_view_image_tool: true,
active_profile: Some("gpt5".to_string()),
disable_paste_burst: false,

View File

@@ -80,10 +80,7 @@ pub struct ModelProviderInfo {
/// the connection as lost.
pub stream_idle_timeout_ms: Option<u64>,
/// Does this provider require an OpenAI API Key or ChatGPT login token? If true,
/// user is presented with login screen on first run, and login preference and token/key
/// are stored in auth.json. If false (which is the default), login screen is skipped,
/// and API key (if needed) comes from the "env_key" environment variable.
/// Whether this provider requires some form of standard authentication (API key, ChatGPT token).
#[serde(default)]
pub requires_openai_auth: bool,
}

View File

@@ -8,7 +8,6 @@ use std::collections::HashMap;
use crate::model_family::ModelFamily;
use crate::plan_tool::PLAN_TOOL;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy;
use crate::tool_apply_patch::ApplyPatchToolType;
use crate::tool_apply_patch::create_apply_patch_freeform_tool;
use crate::tool_apply_patch::create_apply_patch_json_tool;
@@ -58,7 +57,7 @@ pub(crate) enum OpenAiTool {
#[derive(Debug, Clone)]
pub enum ConfigShellToolType {
DefaultShell,
ShellWithRequest { sandbox_policy: SandboxPolicy },
ShellWithRequest,
LocalShell,
StreamableShell,
}
@@ -76,7 +75,6 @@ pub(crate) struct ToolsConfig {
pub(crate) struct ToolsConfigParams<'a> {
pub(crate) model_family: &'a ModelFamily,
pub(crate) approval_policy: AskForApproval,
pub(crate) sandbox_policy: SandboxPolicy,
pub(crate) include_plan_tool: bool,
pub(crate) include_apply_patch_tool: bool,
pub(crate) include_web_search_request: bool,
@@ -90,7 +88,6 @@ impl ToolsConfig {
let ToolsConfigParams {
model_family,
approval_policy,
sandbox_policy,
include_plan_tool,
include_apply_patch_tool,
include_web_search_request,
@@ -106,9 +103,7 @@ impl ToolsConfig {
ConfigShellToolType::DefaultShell
};
if matches!(approval_policy, AskForApproval::OnRequest) && !use_streamable_shell_tool {
shell_type = ConfigShellToolType::ShellWithRequest {
sandbox_policy: sandbox_policy.clone(),
}
shell_type = ConfigShellToolType::ShellWithRequest;
}
let apply_patch_tool_type = match model_family.apply_patch_tool_type {
@@ -251,7 +246,9 @@ fn create_unified_exec_tool() -> OpenAiTool {
})
}
fn create_shell_tool_for_sandbox(sandbox_policy: &SandboxPolicy) -> OpenAiTool {
const SHELL_TOOL_DESCRIPTION: &str = r#"Runs a shell command and returns its output"#;
fn create_shell_tool_for_sandbox() -> OpenAiTool {
let mut properties = BTreeMap::new();
properties.insert(
"command".to_string(),
@@ -263,79 +260,29 @@ fn create_shell_tool_for_sandbox(sandbox_policy: &SandboxPolicy) -> OpenAiTool {
properties.insert(
"workdir".to_string(),
JsonSchema::String {
description: Some("The working directory to execute the command in".to_string()),
description: Some("Working directory to execute the command in.".to_string()),
},
);
properties.insert(
"timeout_ms".to_string(),
JsonSchema::Number {
description: Some("The timeout for the command in milliseconds".to_string()),
description: Some("Timeout for the command in milliseconds.".to_string()),
},
);
if matches!(sandbox_policy, SandboxPolicy::WorkspaceWrite { .. }) {
properties.insert(
properties.insert(
"with_escalated_permissions".to_string(),
JsonSchema::Boolean {
description: Some("Whether to request escalated permissions. Set to true if command needs to be run without sandbox restrictions".to_string()),
description: Some("Request escalated permissions, only for when a command would otherwise be blocked by the sandbox.".to_string()),
},
);
properties.insert(
properties.insert(
"justification".to_string(),
JsonSchema::String {
description: Some("Only set if with_escalated_permissions is true. 1-sentence explanation of why we want to run this command.".to_string()),
description: Some("Required if and only if with_escalated_permissions == true. One sentence explaining why escalation is needed (e.g., write outside CWD, network fetch, git commit).".to_string()),
},
);
}
let description = match sandbox_policy {
SandboxPolicy::WorkspaceWrite {
network_access,
..
} => {
let network_line = if !network_access {
"\n - Commands that require network access"
} else {
""
};
format!(
r#"
The shell tool is used to execute shell commands.
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands will require escalated privileges:
- Types of actions that require escalated privileges:
- Writing files other than those in the writable roots (see the environment context for the allowed directories){network_line}
- Examples of commands that require escalated privileges:
- git commit
- npm install or pnpm install
- cargo build
- cargo test
- When invoking a command that will require escalated privileges:
- Provide the with_escalated_permissions parameter with the boolean value true
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter."#,
)
}
SandboxPolicy::DangerFullAccess => {
"Runs a shell command and returns its output.".to_string()
}
SandboxPolicy::ReadOnly => {
r#"
The shell tool is used to execute shell commands.
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands (including apply_patch) will require escalated permissions:
- Types of actions that require escalated privileges:
- Writing files
- Applying patches
- Examples of commands that require escalated privileges:
- apply_patch
- git commit
- npm install or pnpm install
- cargo build
- cargo test
- When invoking a command that will require escalated privileges:
- Provide the with_escalated_permissions parameter with the boolean value true
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter"#.to_string()
}
};
let description = SHELL_TOOL_DESCRIPTION.to_string();
OpenAiTool::Function(ResponsesApiTool {
name: "shell".to_string(),
@@ -348,7 +295,6 @@ The shell tool is used to execute shell commands.
},
})
}
fn create_view_image_tool() -> OpenAiTool {
// Support only local filesystem path.
let mut properties = BTreeMap::new();
@@ -589,8 +535,8 @@ pub(crate) fn get_openai_tools(
ConfigShellToolType::DefaultShell => {
tools.push(create_shell_tool());
}
ConfigShellToolType::ShellWithRequest { sandbox_policy } => {
tools.push(create_shell_tool_for_sandbox(sandbox_policy));
ConfigShellToolType::ShellWithRequest => {
tools.push(create_shell_tool_for_sandbox());
}
ConfigShellToolType::LocalShell => {
tools.push(OpenAiTool::LocalShell {});
@@ -686,7 +632,6 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: true,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -708,7 +653,6 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: true,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -730,7 +674,6 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -836,7 +779,6 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: false,
@@ -914,7 +856,6 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -977,7 +918,6 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -1035,7 +975,6 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -1096,7 +1035,6 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -1150,13 +1088,7 @@ mod tests {
#[test]
fn test_shell_tool_for_sandbox_workspace_write() {
let sandbox_policy = SandboxPolicy::WorkspaceWrite {
writable_roots: vec!["workspace".into()],
network_access: false,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: false,
};
let tool = super::create_shell_tool_for_sandbox(&sandbox_policy);
let tool = super::create_shell_tool_for_sandbox();
let OpenAiTool::Function(ResponsesApiTool {
description, name, ..
}) = &tool
@@ -1165,26 +1097,13 @@ mod tests {
};
assert_eq!(name, "shell");
let expected = r#"
The shell tool is used to execute shell commands.
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands will require escalated privileges:
- Types of actions that require escalated privileges:
- Writing files other than those in the writable roots (see the environment context for the allowed directories)
- Commands that require network access
- Examples of commands that require escalated privileges:
- git commit
- npm install or pnpm install
- cargo build
- cargo test
- When invoking a command that will require escalated privileges:
- Provide the with_escalated_permissions parameter with the boolean value true
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter."#;
let expected = super::SHELL_TOOL_DESCRIPTION;
assert_eq!(description, expected);
}
#[test]
fn test_shell_tool_for_sandbox_readonly() {
let tool = super::create_shell_tool_for_sandbox(&SandboxPolicy::ReadOnly);
let tool = super::create_shell_tool_for_sandbox();
let OpenAiTool::Function(ResponsesApiTool {
description, name, ..
}) = &tool
@@ -1193,27 +1112,13 @@ The shell tool is used to execute shell commands.
};
assert_eq!(name, "shell");
let expected = r#"
The shell tool is used to execute shell commands.
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands (including apply_patch) will require escalated permissions:
- Types of actions that require escalated privileges:
- Writing files
- Applying patches
- Examples of commands that require escalated privileges:
- apply_patch
- git commit
- npm install or pnpm install
- cargo build
- cargo test
- When invoking a command that will require escalated privileges:
- Provide the with_escalated_permissions parameter with the boolean value true
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter"#;
let expected = super::SHELL_TOOL_DESCRIPTION;
assert_eq!(description, expected);
}
#[test]
fn test_shell_tool_for_sandbox_danger_full_access() {
let tool = super::create_shell_tool_for_sandbox(&SandboxPolicy::DangerFullAccess);
let tool = super::create_shell_tool_for_sandbox();
let OpenAiTool::Function(ResponsesApiTool {
description, name, ..
}) = &tool
@@ -1222,6 +1127,7 @@ The shell tool is used to execute shell commands.
};
assert_eq!(name, "shell");
assert_eq!(description, "Runs a shell command and returns its output.");
let expected = super::SHELL_TOOL_DESCRIPTION;
assert_eq!(description, expected);
}
}

View File

@@ -3,6 +3,8 @@ use serde::Deserialize;
use serde::Serialize;
use thiserror::Error;
use codex_protocol::mcp_protocol::AuthMode;
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Default)]
pub struct TokenData {
/// Flat info parsed from the JWT in auth.json.
@@ -20,6 +22,36 @@ pub struct TokenData {
pub account_id: Option<String>,
}
impl TokenData {
/// Returns true if this is a plan that should use the traditional
/// "metered" billing via an API key.
pub(crate) fn should_use_api_key(
&self,
preferred_auth_method: AuthMode,
is_openai_email: bool,
) -> bool {
if preferred_auth_method == AuthMode::ApiKey {
return true;
}
// If the email is an OpenAI email, use AuthMode::ChatGPT unless preferred_auth_method is AuthMode::ApiKey.
if is_openai_email {
return false;
}
self.id_token
.chatgpt_plan_type
.as_ref()
.is_none_or(|plan| plan.is_plan_that_should_use_api_key())
}
pub fn is_openai_email(&self) -> bool {
self.id_token
.email
.as_deref()
.is_some_and(|email| email.trim().to_ascii_lowercase().ends_with("@openai.com"))
}
}
/// Flat subset of useful claims in id_token from auth.json.
#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)]
pub struct IdTokenInfo {
@@ -48,6 +80,19 @@ pub(crate) enum PlanType {
}
impl PlanType {
fn is_plan_that_should_use_api_key(&self) -> bool {
match self {
Self::Known(known) => {
use KnownPlan::*;
!matches!(known, Free | Plus | Pro | Team)
}
Self::Unknown(_) => {
// Unknown plans should use the API key.
true
}
}
}
pub fn as_string(&self) -> String {
match self {
Self::Known(known) => format!("{known:?}").to_lowercase(),

View File

@@ -8,6 +8,7 @@ use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use codex_protocol::mcp_protocol::AuthMode;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id;
use core_test_support::wait_for_event;
@@ -276,7 +277,25 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
"content": [{ "type": "input_text", "text": "hello" }]
}
]);
assert_eq!(request_body["input"], expected_input);
let input_array = request_body
.get("input")
.and_then(|v| v.as_array())
.cloned()
.expect("input array in request body");
let filtered: Vec<serde_json::Value> = input_array
.into_iter()
.filter(|item| {
let text = item
.get("content")
.and_then(|c| c.as_array())
.and_then(|a| a.first())
.and_then(|o| o.get("text"))
.and_then(|t| t.as_str())
.unwrap_or("");
!text.contains("<environment_context>")
})
.collect();
assert_eq!(serde_json::json!(filtered), expected_input);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@@ -488,6 +507,79 @@ async fn chatgpt_auth_sends_correct_request() {
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn prefers_chatgpt_token_when_config_prefers_chatgpt() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
// Mock server
let server = MockServer::start().await;
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
// Expect ChatGPT base path and correct headers
Mock::given(method("POST"))
.and(path("/v1/responses"))
.and(header_regex("Authorization", r"Bearer Access-123"))
.and(header_regex("chatgpt-account-id", r"acc-123"))
.respond_with(first)
.expect(1)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
// Init session
let codex_home = TempDir::new().unwrap();
// Write auth.json that contains both API key and ChatGPT tokens for a plan that should prefer ChatGPT.
let _jwt = write_auth_json(
&codex_home,
Some("sk-test-key"),
"pro",
"Access-123",
Some("acc-123"),
);
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
config.preferred_auth_method = AuthMode::ChatGPT;
let auth_manager =
match CodexAuth::from_codex_home(codex_home.path(), config.preferred_auth_method) {
Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth),
Ok(None) => panic!("No CodexAuth found in codex_home"),
Err(e) => panic!("Failed to load CodexAuth: {e}"),
};
let conversation_manager = ConversationManager::new(auth_manager);
let NewConversation {
conversation: codex,
..
} = conversation_manager
.new_conversation(config)
.await
.expect("create new conversation");
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
@@ -532,12 +624,14 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
config.preferred_auth_method = AuthMode::ApiKey;
let auth_manager = match CodexAuth::from_codex_home(codex_home.path()) {
Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth),
Ok(None) => panic!("No CodexAuth found in codex_home"),
Err(e) => panic!("Failed to load CodexAuth: {e}"),
};
let auth_manager =
match CodexAuth::from_codex_home(codex_home.path(), config.preferred_auth_method) {
Ok(Some(auth)) => codex_core::AuthManager::from_auth_for_testing(auth),
Ok(None) => panic!("No CodexAuth found in codex_home"),
Err(e) => panic!("Failed to load CodexAuth: {e}"),
};
let conversation_manager = ConversationManager::new(auth_manager);
let NewConversation {
conversation: codex,
@@ -874,34 +968,6 @@ async fn history_dedupes_streamed_and_final_messages_across_turns() {
assert_eq!(requests.len(), 3, "expected 3 requests (one per turn)");
// Replace full-array compare with tail-only raw JSON compare using a single hard-coded value.
let r3_tail_expected = json!([
{
"type": "message",
"role": "user",
"content": [{"type":"input_text","text":"U1"}]
},
{
"type": "message",
"role": "assistant",
"content": [{"type":"output_text","text":"Hey there!\n"}]
},
{
"type": "message",
"role": "user",
"content": [{"type":"input_text","text":"U2"}]
},
{
"type": "message",
"role": "assistant",
"content": [{"type":"output_text","text":"Hey there!\n"}]
},
{
"type": "message",
"role": "user",
"content": [{"type":"input_text","text":"U3"}]
}
]);
let r3_input_array = requests[2]
.body_json::<serde_json::Value>()
.unwrap()
@@ -909,12 +975,60 @@ async fn history_dedupes_streamed_and_final_messages_across_turns() {
.and_then(|v| v.as_array())
.cloned()
.expect("r3 missing input array");
// skipping earlier context and developer messages
let tail_len = r3_tail_expected.as_array().unwrap().len();
let actual_tail = &r3_input_array[r3_input_array.len() - tail_len..];
// We only assert on the last 5 items of the input history for request 3.
// With per-turn environment context injected, the last 5 should be:
// [env_ctx, U2, assistant("Hey there!\n"), env_ctx, U3]
let actual_tail = &r3_input_array[r3_input_array.len() - 5..];
// env_ctx 1
assert_eq!(actual_tail[0]["type"], serde_json::json!("message"));
assert_eq!(actual_tail[0]["role"], serde_json::json!("user"));
let env_text_1 = &actual_tail[0]["content"][0]["text"];
assert!(
env_text_1
.as_str()
.expect("env text should be string")
.contains("<environment_context>")
);
// U2
assert_eq!(
serde_json::Value::Array(actual_tail.to_vec()),
r3_tail_expected,
"request 3 tail mismatch",
actual_tail[1],
serde_json::json!({
"type": "message",
"role": "user",
"content": [ { "type": "input_text", "text": "U2" } ]
})
);
// assistant response
assert_eq!(
actual_tail[2],
serde_json::json!({
"type": "message",
"role": "assistant",
"content": [ { "type": "output_text", "text": "Hey there!\n" } ]
})
);
// env_ctx 2
assert_eq!(actual_tail[3]["type"], serde_json::json!("message"));
assert_eq!(actual_tail[3]["role"], serde_json::json!("user"));
let env_text_2 = &actual_tail[3]["content"][0]["text"];
assert!(
env_text_2
.as_str()
.expect("env text should be string")
.contains("<environment_context>")
);
// U3
assert_eq!(
actual_tail[4],
serde_json::json!({
"type": "message",
"role": "user",
"content": [ { "type": "input_text", "text": "U3" } ]
})
);
}

View File

@@ -191,7 +191,8 @@ async fn prompt_tools_are_consistent_across_requests() {
let expected_instructions: &str = include_str!("../../prompt.md");
// our internal implementation is responsible for keeping tools in sync
// with the OpenAI schema, so we just verify the tool presence here
let expected_tools_names: &[&str] = &["shell", "update_plan", "apply_patch", "view_image"];
let expected_tools_names: &[&str] =
&["unified_exec", "update_plan", "apply_patch", "view_image"];
let body0 = requests[0].body_json::<serde_json::Value>().unwrap();
assert_eq!(
body0["instructions"],
@@ -271,7 +272,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
let shell = default_user_shell().await;
let expected_env_text = format!(
let expected_env_text_init = format!(
r#"<environment_context>
<cwd>{}</cwd>
<approval_policy>on-request</approval_policy>
@@ -284,13 +285,28 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
None => String::new(),
}
);
// Per-turn environment context omits the shell tag.
let expected_env_text_turn = format!(
r#"<environment_context>
<cwd>{}</cwd>
<approval_policy>on-request</approval_policy>
<sandbox_mode>read-only</sandbox_mode>
<network_access>restricted</network_access>
</environment_context>"#,
cwd.path().to_string_lossy(),
);
let expected_ui_text =
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
let expected_env_msg = serde_json::json!({
let expected_env_msg_init = serde_json::json!({
"type": "message",
"role": "user",
"content": [ { "type": "input_text", "text": expected_env_text } ]
"content": [ { "type": "input_text", "text": expected_env_text_init } ]
});
let expected_env_msg_turn = serde_json::json!({
"type": "message",
"role": "user",
"content": [ { "type": "input_text", "text": expected_env_text_turn } ]
});
let expected_ui_msg = serde_json::json!({
"type": "message",
@@ -306,7 +322,12 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
assert_eq!(
body1["input"],
serde_json::json!([expected_ui_msg, expected_env_msg, expected_user_message_1])
serde_json::json!([
expected_ui_msg,
expected_env_msg_init,
expected_env_msg_turn,
expected_user_message_1
])
);
let expected_user_message_2 = serde_json::json!({
@@ -318,7 +339,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
let expected_body2 = serde_json::json!(
[
body1["input"].as_array().unwrap().as_slice(),
[expected_user_message_2].as_slice(),
[expected_env_msg_turn, expected_user_message_2].as_slice(),
]
.concat()
);
@@ -546,10 +567,24 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
"role": "user",
"content": [ { "type": "input_text", "text": "hello 2" } ]
});
let expected_env_text_2 = format!(
r#"<environment_context>
<cwd>{}</cwd>
<approval_policy>never</approval_policy>
<sandbox_mode>workspace-write</sandbox_mode>
<network_access>enabled</network_access>
</environment_context>"#,
new_cwd.path().to_string_lossy()
);
let expected_env_msg_2 = serde_json::json!({
"type": "message",
"role": "user",
"content": [ { "type": "input_text", "text": expected_env_text_2 } ]
});
let expected_body2 = serde_json::json!(
[
body1["input"].as_array().unwrap().as_slice(),
[expected_user_message_2].as_slice(),
[expected_env_msg_2, expected_user_message_2].as_slice(),
]
.concat()
);

View File

@@ -146,7 +146,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
model_provider,
codex_linux_sandbox_exe,
base_instructions: None,
include_plan_tool: Some(true),
include_plan_tool: None,
include_apply_patch_tool: None,
include_view_image_tool: None,
show_raw_agent_reasoning: oss.then_some(true),
@@ -187,8 +187,10 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
std::process::exit(1);
}
let conversation_manager =
ConversationManager::new(AuthManager::shared(config.codex_home.clone()));
let conversation_manager = ConversationManager::new(AuthManager::shared(
config.codex_home.clone(),
config.preferred_auth_method,
));
let NewConversation {
conversation_id: _,
conversation,

View File

@@ -12,7 +12,6 @@ use codex_core::RolloutRecorder;
use codex_core::SessionMeta;
use codex_core::auth::CLIENT_ID;
use codex_core::auth::get_auth_file;
use codex_core::auth::login_with_api_key;
use codex_core::auth::try_read_auth_json;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
@@ -40,6 +39,7 @@ use codex_protocol::mcp_protocol::ApplyPatchApprovalParams;
use codex_protocol::mcp_protocol::ApplyPatchApprovalResponse;
use codex_protocol::mcp_protocol::ArchiveConversationParams;
use codex_protocol::mcp_protocol::ArchiveConversationResponse;
use codex_protocol::mcp_protocol::AuthMode;
use codex_protocol::mcp_protocol::AuthStatusChangeNotification;
use codex_protocol::mcp_protocol::ClientRequest;
use codex_protocol::mcp_protocol::ConversationId;
@@ -57,8 +57,6 @@ use codex_protocol::mcp_protocol::InterruptConversationParams;
use codex_protocol::mcp_protocol::InterruptConversationResponse;
use codex_protocol::mcp_protocol::ListConversationsParams;
use codex_protocol::mcp_protocol::ListConversationsResponse;
use codex_protocol::mcp_protocol::LoginApiKeyParams;
use codex_protocol::mcp_protocol::LoginApiKeyResponse;
use codex_protocol::mcp_protocol::LoginChatGptCompleteNotification;
use codex_protocol::mcp_protocol::LoginChatGptResponse;
use codex_protocol::mcp_protocol::NewConversationParams;
@@ -174,9 +172,6 @@ impl CodexMessageProcessor {
ClientRequest::GitDiffToRemote { request_id, params } => {
self.git_diff_to_origin(request_id, params.cwd).await;
}
ClientRequest::LoginApiKey { request_id, params } => {
self.login_api_key(request_id, params).await;
}
ClientRequest::LoginChatGpt { request_id } => {
self.login_chatgpt(request_id).await;
}
@@ -204,39 +199,6 @@ impl CodexMessageProcessor {
}
}
async fn login_api_key(&mut self, request_id: RequestId, params: LoginApiKeyParams) {
{
let mut guard = self.active_login.lock().await;
if let Some(active) = guard.take() {
active.drop();
}
}
match login_with_api_key(&self.config.codex_home, &params.api_key) {
Ok(()) => {
self.auth_manager.reload();
self.outgoing
.send_response(request_id, LoginApiKeyResponse {})
.await;
let payload = AuthStatusChangeNotification {
auth_method: self.auth_manager.auth().map(|auth| auth.mode),
};
self.outgoing
.send_server_notification(ServerNotification::AuthStatusChange(payload))
.await;
}
Err(err) => {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to save api key: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
}
}
}
async fn login_chatgpt(&mut self, request_id: RequestId) {
let config = self.config.as_ref();
@@ -390,7 +352,7 @@ impl CodexMessageProcessor {
.await;
// Send auth status change notification reflecting the current auth mode
// after logout.
// after logout (which may fall back to API key via env var).
let current_auth_method = self.auth_manager.auth().map(|auth| auth.mode);
let payload = AuthStatusChangeNotification {
auth_method: current_auth_method,
@@ -405,6 +367,7 @@ impl CodexMessageProcessor {
request_id: RequestId,
params: codex_protocol::mcp_protocol::GetAuthStatusParams,
) {
let preferred_auth_method: AuthMode = self.auth_manager.preferred_auth_method();
let include_token = params.include_token.unwrap_or(false);
let do_refresh = params.refresh_token.unwrap_or(false);
@@ -412,11 +375,6 @@ impl CodexMessageProcessor {
tracing::warn!("failed to refresh token while getting auth status: {err}");
}
// Determine whether auth is required based on the active model provider.
// If a custom provider is configured with `requires_openai_auth == false`,
// then no auth step is required; otherwise, default to requiring auth.
let requires_openai_auth = Some(self.config.model_provider.requires_openai_auth);
let response = match self.auth_manager.auth() {
Some(auth) => {
let (reported_auth_method, token_opt) = match auth.get_token().await {
@@ -432,14 +390,14 @@ impl CodexMessageProcessor {
};
codex_protocol::mcp_protocol::GetAuthStatusResponse {
auth_method: reported_auth_method,
preferred_auth_method,
auth_token: token_opt,
requires_openai_auth,
}
}
None => codex_protocol::mcp_protocol::GetAuthStatusResponse {
auth_method: None,
preferred_auth_method,
auth_token: None,
requires_openai_auth,
},
};

View File

@@ -56,7 +56,8 @@ impl MessageProcessor {
config: Arc<Config>,
) -> Self {
let outgoing = Arc::new(outgoing);
let auth_manager = AuthManager::shared(config.codex_home.clone());
let auth_manager =
AuthManager::shared(config.codex_home.clone(), config.preferred_auth_method);
let conversation_manager = Arc::new(ConversationManager::new(auth_manager.clone()));
let codex_message_processor = CodexMessageProcessor::new(
auth_manager,

View File

@@ -18,7 +18,6 @@ use codex_protocol::mcp_protocol::CancelLoginChatGptParams;
use codex_protocol::mcp_protocol::GetAuthStatusParams;
use codex_protocol::mcp_protocol::InterruptConversationParams;
use codex_protocol::mcp_protocol::ListConversationsParams;
use codex_protocol::mcp_protocol::LoginApiKeyParams;
use codex_protocol::mcp_protocol::NewConversationParams;
use codex_protocol::mcp_protocol::RemoveConversationListenerParams;
use codex_protocol::mcp_protocol::ResumeConversationParams;
@@ -319,15 +318,6 @@ impl McpProcess {
self.send_request("resumeConversation", params).await
}
/// Send a `loginApiKey` JSON-RPC request.
pub async fn send_login_api_key_request(
&mut self,
params: LoginApiKeyParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("loginApiKey", params).await
}
/// Send a `loginChatGpt` JSON-RPC request.
pub async fn send_login_chat_gpt_request(&mut self) -> anyhow::Result<i64> {
self.send_request("loginChatGpt", None).await

View File

@@ -1,10 +1,9 @@
use std::path::Path;
use codex_core::auth::login_with_api_key;
use codex_protocol::mcp_protocol::AuthMode;
use codex_protocol::mcp_protocol::GetAuthStatusParams;
use codex_protocol::mcp_protocol::GetAuthStatusResponse;
use codex_protocol::mcp_protocol::LoginApiKeyParams;
use codex_protocol::mcp_protocol::LoginApiKeyResponse;
use mcp_test_support::McpProcess;
use mcp_test_support::to_response;
use mcp_types::JSONRPCResponse;
@@ -37,29 +36,10 @@ stream_max_retries = 0
)
}
async fn login_with_api_key_via_request(mcp: &mut McpProcess, api_key: &str) {
let request_id = mcp
.send_login_api_key_request(LoginApiKeyParams {
api_key: api_key.to_string(),
})
.await
.unwrap_or_else(|e| panic!("send loginApiKey: {e}"));
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await
.unwrap_or_else(|e| panic!("loginApiKey timeout: {e}"))
.unwrap_or_else(|e| panic!("loginApiKey response: {e}"));
let _: LoginApiKeyResponse =
to_response(resp).unwrap_or_else(|e| panic!("deserialize login response: {e}"));
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_no_auth() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml(codex_home.path()).unwrap_or_else(|err| panic!("write config.toml: {err}"));
create_config_toml(codex_home.path()).expect("write config.toml");
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)])
.await
@@ -92,7 +72,8 @@ async fn get_auth_status_no_auth() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_with_api_key() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml(codex_home.path()).unwrap_or_else(|err| panic!("write config.toml: {err}"));
create_config_toml(codex_home.path()).expect("write config.toml");
login_with_api_key(codex_home.path(), "sk-test-key").expect("seed api key");
let mut mcp = McpProcess::new(codex_home.path())
.await
@@ -102,8 +83,6 @@ async fn get_auth_status_with_api_key() {
.expect("init timeout")
.expect("init failed");
login_with_api_key_via_request(&mut mcp, "sk-test-key").await;
let request_id = mcp
.send_get_auth_status_request(GetAuthStatusParams {
include_token: Some(true),
@@ -122,12 +101,14 @@ async fn get_auth_status_with_api_key() {
let status: GetAuthStatusResponse = to_response(resp).expect("deserialize status");
assert_eq!(status.auth_method, Some(AuthMode::ApiKey));
assert_eq!(status.auth_token, Some("sk-test-key".to_string()));
assert_eq!(status.preferred_auth_method, AuthMode::ChatGPT);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_with_api_key_no_include_token() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml(codex_home.path()).unwrap_or_else(|err| panic!("write config.toml: {err}"));
create_config_toml(codex_home.path()).expect("write config.toml");
login_with_api_key(codex_home.path(), "sk-test-key").expect("seed api key");
let mut mcp = McpProcess::new(codex_home.path())
.await
@@ -137,8 +118,6 @@ async fn get_auth_status_with_api_key_no_include_token() {
.expect("init timeout")
.expect("init failed");
login_with_api_key_via_request(&mut mcp, "sk-test-key").await;
// Build params via struct so None field is omitted in wire JSON.
let params = GetAuthStatusParams {
include_token: None,
@@ -159,4 +138,5 @@ async fn get_auth_status_with_api_key_no_include_token() {
let status: GetAuthStatusResponse = to_response(resp).expect("deserialize status");
assert_eq!(status.auth_method, Some(AuthMode::ApiKey));
assert!(status.auth_token.is_none(), "token must be omitted");
assert_eq!(status.preferred_auth_method, AuthMode::ChatGPT);
}

View File

@@ -1,7 +1,7 @@
use std::path::Path;
use std::time::Duration;
use codex_login::login_with_api_key;
use codex_core::auth::login_with_api_key;
use codex_protocol::mcp_protocol::CancelLoginChatGptParams;
use codex_protocol::mcp_protocol::CancelLoginChatGptResponse;
use codex_protocol::mcp_protocol::GetAuthStatusParams;
@@ -95,7 +95,7 @@ async fn logout_chatgpt_removes_auth() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn login_and_cancel_chatgpt() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
create_config_toml(codex_home.path()).unwrap_or_else(|err| panic!("write config.toml: {err}"));
create_config_toml(codex_home.path()).expect("write config.toml");
let mut mcp = McpProcess::new(codex_home.path())
.await

View File

@@ -1,21 +0,0 @@
#!/usr/bin/env python3
import subprocess
import sys
from pathlib import Path
def main() -> int:
crate_dir = Path(__file__).resolve().parent
generator = crate_dir / "generate_mcp_types.py"
result = subprocess.run(
[sys.executable, str(generator), "--check"],
cwd=crate_dir,
check=False,
)
return result.returncode
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -5,19 +5,15 @@ import argparse
import json
import subprocess
import sys
import tempfile
from dataclasses import (
dataclass,
)
from difflib import unified_diff
from pathlib import Path
from shutil import copy2
# Helper first so it is defined when other functions call it.
from typing import Any, Literal
SCHEMA_VERSION = "2025-06-18"
JSONRPC_VERSION = "2.0"
@@ -47,31 +43,16 @@ def main() -> int:
default_schema_file = (
Path(__file__).resolve().parent / "schema" / SCHEMA_VERSION / "schema.json"
)
default_lib_rs = Path(__file__).resolve().parent / "src/lib.rs"
parser.add_argument(
"schema_file",
nargs="?",
default=default_schema_file,
help="schema.json file to process",
)
parser.add_argument(
"--check",
action="store_true",
help="Regenerate lib.rs in a sandbox and ensure the checked-in file matches",
)
args = parser.parse_args()
schema_file = Path(args.schema_file)
crate_dir = Path(__file__).resolve().parent
schema_file = args.schema_file
if args.check:
return run_check(schema_file, crate_dir, default_lib_rs)
generate_lib_rs(schema_file, default_lib_rs, fmt=True)
return 0
def generate_lib_rs(schema_file: Path, lib_rs: Path, fmt: bool) -> None:
lib_rs.parent.mkdir(parents=True, exist_ok=True)
lib_rs = Path(__file__).resolve().parent / "src/lib.rs"
global DEFINITIONS # Allow helper functions to access the schema.
@@ -136,7 +117,9 @@ fn default_jsonrpc() -> String {{ JSONRPC_VERSION.to_owned() }}
for req_name in CLIENT_REQUEST_TYPE_NAMES:
defn = definitions[req_name]
method_const = defn.get("properties", {}).get("method", {}).get("const", req_name)
method_const = (
defn.get("properties", {}).get("method", {}).get("const", req_name)
)
payload_type = f"<{req_name} as ModelContextProtocolRequest>::Params"
try_from_impl_lines.append(f' "{method_const}" => {{\n')
try_from_impl_lines.append(
@@ -145,7 +128,9 @@ fn default_jsonrpc() -> String {{ JSONRPC_VERSION.to_owned() }}
try_from_impl_lines.append(
f" let params: {payload_type} = serde_json::from_value(params_json)?;\n"
)
try_from_impl_lines.append(f" Ok(ClientRequest::{req_name}(params))\n")
try_from_impl_lines.append(
f" Ok(ClientRequest::{req_name}(params))\n"
)
try_from_impl_lines.append(" },\n")
try_from_impl_lines.append(
@@ -159,7 +144,9 @@ fn default_jsonrpc() -> String {{ JSONRPC_VERSION.to_owned() }}
# Generate TryFrom for ServerNotification
notif_impl_lines: list[str] = []
notif_impl_lines.append("impl TryFrom<JSONRPCNotification> for ServerNotification {\n")
notif_impl_lines.append(
"impl TryFrom<JSONRPCNotification> for ServerNotification {\n"
)
notif_impl_lines.append(" type Error = serde_json::Error;\n")
notif_impl_lines.append(
" fn try_from(n: JSONRPCNotification) -> std::result::Result<Self, Self::Error> {\n"
@@ -168,7 +155,9 @@ fn default_jsonrpc() -> String {{ JSONRPC_VERSION.to_owned() }}
for notif_name in SERVER_NOTIFICATION_TYPE_NAMES:
n_def = definitions[notif_name]
method_const = n_def.get("properties", {}).get("method", {}).get("const", notif_name)
method_const = (
n_def.get("properties", {}).get("method", {}).get("const", notif_name)
)
payload_type = f"<{notif_name} as ModelContextProtocolNotification>::Params"
notif_impl_lines.append(f' "{method_const}" => {{\n')
# params may be optional
@@ -178,7 +167,9 @@ fn default_jsonrpc() -> String {{ JSONRPC_VERSION.to_owned() }}
notif_impl_lines.append(
f" let params: {payload_type} = serde_json::from_value(params_json)?;\n"
)
notif_impl_lines.append(f" Ok(ServerNotification::{notif_name}(params))\n")
notif_impl_lines.append(
f" Ok(ServerNotification::{notif_name}(params))\n"
)
notif_impl_lines.append(" },\n")
notif_impl_lines.append(
@@ -194,70 +185,13 @@ fn default_jsonrpc() -> String {{ JSONRPC_VERSION.to_owned() }}
for chunk in out:
f.write(chunk)
if fmt:
subprocess.check_call(
["cargo", "fmt", "--", "--config", "imports_granularity=Item"],
cwd=lib_rs.parent.parent,
stderr=subprocess.DEVNULL,
)
subprocess.check_call(
["cargo", "fmt", "--", "--config", "imports_granularity=Item"],
cwd=lib_rs.parent.parent,
stderr=subprocess.DEVNULL,
)
def run_check(schema_file: Path, crate_dir: Path, checked_in_lib: Path) -> int:
config_path = crate_dir.parent / "rustfmt.toml"
eprint(f"Running --check with schema {schema_file}")
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
eprint(f"Created temporary workspace at {tmp_path}")
manifest_path = tmp_path / "Cargo.toml"
eprint(f"Copying Cargo.toml into {manifest_path}")
copy2(crate_dir / "Cargo.toml", manifest_path)
manifest_text = manifest_path.read_text(encoding="utf-8")
manifest_text = manifest_text.replace(
"version = { workspace = true }",
'version = "0.0.0"',
)
manifest_text = manifest_text.replace("\n[lints]\nworkspace = true\n", "\n")
manifest_path.write_text(manifest_text, encoding="utf-8")
src_dir = tmp_path / "src"
src_dir.mkdir(parents=True, exist_ok=True)
eprint(f"Generating lib.rs into {src_dir}")
generated_lib = src_dir / "lib.rs"
generate_lib_rs(schema_file, generated_lib, fmt=False)
eprint("Formatting generated lib.rs with rustfmt")
subprocess.check_call(
[
"rustfmt",
"--config-path",
str(config_path),
str(generated_lib),
],
cwd=tmp_path,
stderr=subprocess.DEVNULL,
)
eprint("Comparing generated lib.rs with checked-in version")
checked_in_contents = checked_in_lib.read_text(encoding="utf-8")
generated_contents = generated_lib.read_text(encoding="utf-8")
if checked_in_contents == generated_contents:
eprint("lib.rs matches checked-in version")
return 0
diff = unified_diff(
checked_in_contents.splitlines(keepends=True),
generated_contents.splitlines(keepends=True),
fromfile=str(checked_in_lib),
tofile=str(generated_lib),
)
diff_text = "".join(diff)
eprint("Generated lib.rs does not match the checked-in version. Diff:")
if diff_text:
eprint(diff_text, end="")
eprint("Re-run generate_mcp_types.py without --check to update src/lib.rs.")
return 1
return 0
def add_definition(name: str, definition: dict[str, Any], out: list[str]) -> None:
@@ -487,11 +421,15 @@ def define_untagged_enum(name: str, type_list: list[str], out: list[str]) -> Non
case "integer":
out.append(" Integer(i64),\n")
case _:
raise ValueError(f"Unknown type in untagged enum: {simple_type} in {name}")
raise ValueError(
f"Unknown type in untagged enum: {simple_type} in {name}"
)
out.append("}\n\n")
def define_any_of(name: str, list_of_refs: list[Any], description: str | None = None) -> list[str]:
def define_any_of(
name: str, list_of_refs: list[Any], description: str | None = None
) -> list[str]:
"""Generate a Rust enum for a JSON-Schema `anyOf` union.
For most types we simply map each `$ref` inside the `anyOf` list to a
@@ -556,7 +494,9 @@ def define_any_of(name: str, list_of_refs: list[Any], description: str | None =
if name == "ClientRequest":
payload_type = f"<{ref_name} as ModelContextProtocolRequest>::Params"
else:
payload_type = f"<{ref_name} as ModelContextProtocolNotification>::Params"
payload_type = (
f"<{ref_name} as ModelContextProtocolNotification>::Params"
)
# Determine the wire value for `method` so we can annotate the
# variant appropriately. If for some reason the schema does not
@@ -564,7 +504,9 @@ def define_any_of(name: str, list_of_refs: list[Any], description: str | None =
# least compile (although deserialization will likely fail).
request_def = DEFINITIONS.get(ref_name, {})
method_const = (
request_def.get("properties", {}).get("method", {}).get("const", ref_name)
request_def.get("properties", {})
.get("method", {})
.get("const", ref_name)
)
out.append(f' #[serde(rename = "{method_const}")]\n')
@@ -614,7 +556,7 @@ def map_type(
if type_prop == "string":
if const_prop := typedef.get("const", None):
assert isinstance(const_prop, str)
return f'&\'static str = "{const_prop}"'
return f'&\'static str = "{const_prop }"'
else:
return "String"
elif type_prop == "integer":
@@ -690,7 +632,7 @@ def rust_prop_name(name: str, is_optional: bool) -> RustProp:
serde_annotations.append('skip_serializing_if = "Option::is_none"')
if serde_annotations:
serde_str = f"#[serde({', '.join(serde_annotations)})]"
serde_str = f'#[serde({", ".join(serde_annotations)})]'
else:
serde_str = None
return RustProp(prop_name, serde_str)
@@ -698,7 +640,9 @@ def rust_prop_name(name: str, is_optional: bool) -> RustProp:
def to_snake_case(name: str) -> str:
"""Convert a camelCase or PascalCase name to snake_case."""
snake_case = name[0].lower() + "".join("_" + c.lower() if c.isupper() else c for c in name[1:])
snake_case = name[0].lower() + "".join(
"_" + c.lower() if c.isupper() else c for c in name[1:]
)
if snake_case != name:
return snake_case
else:
@@ -734,9 +678,5 @@ def emit_doc_comment(text: str | None, out: list[str]) -> None:
out.append(f"/// {line.rstrip()}\n")
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, **kwargs)
if __name__ == "__main__":
sys.exit(main())

View File

@@ -31,8 +31,6 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
codex_protocol::mcp_protocol::SendUserTurnResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::InterruptConversationResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::GitDiffToRemoteResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::LoginApiKeyParams::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::LoginApiKeyResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::LoginChatGptResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::CancelLoginChatGptResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::LogoutChatGptResponse::export_all_to(out_dir)?;

View File

@@ -126,11 +126,6 @@ pub enum ClientRequest {
request_id: RequestId,
params: GitDiffToRemoteParams,
},
LoginApiKey {
#[serde(rename = "id")]
request_id: RequestId,
params: LoginApiKeyParams,
},
LoginChatGpt {
#[serde(rename = "id")]
request_id: RequestId,
@@ -293,16 +288,6 @@ pub struct ArchiveConversationResponse {}
#[serde(rename_all = "camelCase")]
pub struct RemoveConversationSubscriptionResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginApiKeyParams {
pub api_key: String,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginApiKeyResponse {}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
#[serde(rename_all = "camelCase")]
pub struct LoginChatGptResponse {
@@ -382,14 +367,9 @@ pub struct ExecArbitraryCommandResponse {
pub struct GetAuthStatusResponse {
#[serde(skip_serializing_if = "Option::is_none")]
pub auth_method: Option<AuthMode>,
pub preferred_auth_method: AuthMode,
#[serde(skip_serializing_if = "Option::is_none")]
pub auth_token: Option<String>,
// Indicates that auth method must be valid to use the server.
// This can be false if using a custom provider that is configured
// with requires_openai_auth == false.
#[serde(skip_serializing_if = "Option::is_none")]
pub requires_openai_auth: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]

View File

@@ -308,7 +308,7 @@ async fn run_ratatui_app(
..
} = cli;
let auth_manager = AuthManager::shared(config.codex_home.clone());
let auth_manager = AuthManager::shared(config.codex_home.clone(), config.preferred_auth_method);
let login_status = get_login_status(&config);
let should_show_onboarding =
should_show_onboarding(login_status, &config, should_show_trust_screen);
@@ -392,7 +392,7 @@ fn get_login_status(config: &Config) -> LoginStatus {
// Reading the OpenAI API key is an async operation because it may need
// to refresh the token. Block on it.
let codex_home = config.codex_home.clone();
match CodexAuth::from_codex_home(&codex_home) {
match CodexAuth::from_codex_home(&codex_home, config.preferred_auth_method) {
Ok(Some(auth)) => LoginStatus::AuthMode(auth.mode),
Ok(None) => LoginStatus::NotAuthenticated,
Err(err) => {
@@ -460,28 +460,60 @@ fn should_show_login_screen(login_status: LoginStatus, config: &Config) -> bool
return false;
}
login_status == LoginStatus::NotAuthenticated
match login_status {
LoginStatus::NotAuthenticated => true,
LoginStatus::AuthMode(method) => method != config.preferred_auth_method,
}
}
#[cfg(test)]
mod tests {
use super::*;
fn make_config() -> Config {
Config::load_from_base_config_with_overrides(
fn make_config(preferred: AuthMode) -> Config {
let mut cfg = Config::load_from_base_config_with_overrides(
ConfigToml::default(),
ConfigOverrides::default(),
std::env::temp_dir(),
)
.expect("load default config")
.expect("load default config");
cfg.preferred_auth_method = preferred;
cfg
}
#[test]
fn shows_login_when_not_authenticated() {
let cfg = make_config();
let cfg = make_config(AuthMode::ChatGPT);
assert!(should_show_login_screen(
LoginStatus::NotAuthenticated,
&cfg
));
}
#[test]
fn shows_login_when_api_key_but_prefers_chatgpt() {
let cfg = make_config(AuthMode::ChatGPT);
assert!(should_show_login_screen(
LoginStatus::AuthMode(AuthMode::ApiKey),
&cfg
))
}
#[test]
fn hides_login_when_api_key_and_prefers_api_key() {
let cfg = make_config(AuthMode::ApiKey);
assert!(!should_show_login_screen(
LoginStatus::AuthMode(AuthMode::ApiKey),
&cfg
))
}
#[test]
fn hides_login_when_chatgpt_and_prefers_chatgpt() {
let cfg = make_config(AuthMode::ChatGPT);
assert!(!should_show_login_screen(
LoginStatus::AuthMode(AuthMode::ChatGPT),
&cfg
))
}
}

View File

@@ -2,17 +2,12 @@
use codex_core::AuthManager;
use codex_core::auth::CLIENT_ID;
use codex_core::auth::login_with_api_key;
use codex_core::auth::read_openai_api_key_from_env;
use codex_login::ServerOptions;
use codex_login::ShutdownHandle;
use codex_login::run_login_server;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyModifiers;
use ratatui::buffer::Buffer;
use ratatui::layout::Constraint;
use ratatui::layout::Layout;
use ratatui::layout::Rect;
use ratatui::prelude::Widget;
use ratatui::style::Color;
@@ -20,9 +15,6 @@ use ratatui::style::Modifier;
use ratatui::style::Style;
use ratatui::style::Stylize;
use ratatui::text::Line;
use ratatui::widgets::Block;
use ratatui::widgets::BorderType;
use ratatui::widgets::Borders;
use ratatui::widgets::Paragraph;
use ratatui::widgets::WidgetRef;
use ratatui::widgets::Wrap;
@@ -46,14 +38,8 @@ pub(crate) enum SignInState {
ChatGptContinueInBrowser(ContinueInBrowserState),
ChatGptSuccessMessage,
ChatGptSuccess,
ApiKeyEntry(ApiKeyInputState),
ApiKeyConfigured,
}
#[derive(Clone, Default)]
pub(crate) struct ApiKeyInputState {
value: String,
prepopulated_from_env: bool,
EnvVarMissing,
EnvVarFound,
}
#[derive(Clone)]
@@ -73,10 +59,6 @@ impl Drop for ContinueInBrowserState {
impl KeyboardHandler for AuthModeWidget {
fn handle_key_event(&mut self, key_event: KeyEvent) {
if self.handle_api_key_entry_key_event(&key_event) {
return;
}
match key_event.code {
KeyCode::Up | KeyCode::Char('k') => {
self.highlighted_mode = AuthMode::ChatGPT;
@@ -87,7 +69,7 @@ impl KeyboardHandler for AuthModeWidget {
KeyCode::Char('1') => {
self.start_chatgpt_login();
}
KeyCode::Char('2') => self.start_api_key_entry(),
KeyCode::Char('2') => self.verify_api_key(),
KeyCode::Enter => {
let sign_in_state = { (*self.sign_in_state.read().unwrap()).clone() };
match sign_in_state {
@@ -96,9 +78,12 @@ impl KeyboardHandler for AuthModeWidget {
self.start_chatgpt_login();
}
AuthMode::ApiKey => {
self.start_api_key_entry();
self.verify_api_key();
}
},
SignInState::EnvVarMissing => {
*self.sign_in_state.write().unwrap() = SignInState::PickMode;
}
SignInState::ChatGptSuccessMessage => {
*self.sign_in_state.write().unwrap() = SignInState::ChatGptSuccess;
}
@@ -116,10 +101,6 @@ impl KeyboardHandler for AuthModeWidget {
_ => {}
}
}
fn handle_paste(&mut self, pasted: String) {
let _ = self.handle_api_key_entry_paste(pasted);
}
}
#[derive(Clone)]
@@ -130,6 +111,7 @@ pub(crate) struct AuthModeWidget {
pub sign_in_state: Arc<RwLock<SignInState>>,
pub codex_home: PathBuf,
pub login_status: LoginStatus,
pub preferred_auth_method: AuthMode,
pub auth_manager: Arc<AuthManager>,
}
@@ -147,6 +129,24 @@ impl AuthModeWidget {
"".into(),
];
// If the user is already authenticated but the method differs from their
// preferred auth method, show a brief explanation.
if let LoginStatus::AuthMode(current) = self.login_status
&& current != self.preferred_auth_method
{
let to_label = |mode: AuthMode| match mode {
AuthMode::ApiKey => "API key",
AuthMode::ChatGPT => "ChatGPT",
};
let msg = format!(
" Youre currently using {} while your preferred method is {}.",
to_label(current),
to_label(self.preferred_auth_method)
);
lines.push(msg.into());
lines.push("".into());
}
let create_mode_item = |idx: usize,
selected_mode: AuthMode,
text: &str,
@@ -175,17 +175,29 @@ impl AuthModeWidget {
vec![line1, line2]
};
let chatgpt_label = if matches!(self.login_status, LoginStatus::AuthMode(AuthMode::ChatGPT))
{
"Continue using ChatGPT"
} else {
"Sign in with ChatGPT"
};
lines.extend(create_mode_item(
0,
AuthMode::ChatGPT,
"Sign in with ChatGPT",
chatgpt_label,
"Usage included with Plus, Pro, and Team plans",
));
let api_key_label = if matches!(self.login_status, LoginStatus::AuthMode(AuthMode::ApiKey))
{
"Continue using API key"
} else {
"Provide your own API key"
};
lines.extend(create_mode_item(
1,
AuthMode::ApiKey,
"Provide your own API key",
api_key_label,
"Pay for what you use",
));
lines.push("".into());
@@ -270,213 +282,26 @@ impl AuthModeWidget {
.render(area, buf);
}
fn render_api_key_configured(&self, area: Rect, buf: &mut Buffer) {
let lines = vec![
"✓ API key configured".fg(Color::Green).into(),
"".into(),
" Codex will use usage-based billing with your API key.".into(),
];
fn render_env_var_found(&self, area: Rect, buf: &mut Buffer) {
let lines = vec!["✓ Using OPENAI_API_KEY".fg(Color::Green).into()];
Paragraph::new(lines)
.wrap(Wrap { trim: false })
.render(area, buf);
}
fn render_api_key_entry(&self, area: Rect, buf: &mut Buffer, state: &ApiKeyInputState) {
let [intro_area, input_area, footer_area] = Layout::vertical([
Constraint::Min(4),
Constraint::Length(3),
Constraint::Min(2),
])
.areas(area);
let mut intro_lines: Vec<Line> = vec![
Line::from(vec![
"> ".into(),
"Use your own OpenAI API key for usage-based billing".bold(),
]),
"".into(),
" Paste or type your API key below. It will be stored locally in auth.json.".into(),
fn render_env_var_missing(&self, area: Rect, buf: &mut Buffer) {
let lines = vec![
" To use Codex with the OpenAI API, set OPENAI_API_KEY in your environment"
.fg(Color::Cyan)
.into(),
"".into(),
" Press Enter to return".dim().into(),
];
if state.prepopulated_from_env {
intro_lines.push(" Detected OPENAI_API_KEY environment variable.".into());
intro_lines.push(
" Paste a different key if you prefer to use another account."
.dim()
.into(),
);
intro_lines.push("".into());
}
Paragraph::new(intro_lines)
Paragraph::new(lines)
.wrap(Wrap { trim: false })
.render(intro_area, buf);
let content_line: Line = if state.value.is_empty() {
vec!["Paste or type your API key".dim()].into()
} else {
Line::from(state.value.clone())
};
Paragraph::new(content_line)
.wrap(Wrap { trim: false })
.block(
Block::default()
.title("API key")
.borders(Borders::ALL)
.border_type(BorderType::Rounded)
.border_style(Style::default().fg(Color::Cyan)),
)
.render(input_area, buf);
let mut footer_lines: Vec<Line> = vec![
" Press Enter to save".dim().into(),
" Press Esc to go back".dim().into(),
];
if let Some(error) = &self.error {
footer_lines.push("".into());
footer_lines.push(error.as_str().red().into());
}
Paragraph::new(footer_lines)
.wrap(Wrap { trim: false })
.render(footer_area, buf);
}
fn handle_api_key_entry_key_event(&mut self, key_event: &KeyEvent) -> bool {
let mut should_save: Option<String> = None;
let mut should_request_frame = false;
{
let mut guard = self.sign_in_state.write().unwrap();
if let SignInState::ApiKeyEntry(state) = &mut *guard {
match key_event.code {
KeyCode::Esc => {
*guard = SignInState::PickMode;
self.error = None;
should_request_frame = true;
}
KeyCode::Enter => {
let trimmed = state.value.trim().to_string();
if trimmed.is_empty() {
self.error = Some("API key cannot be empty".to_string());
should_request_frame = true;
} else {
should_save = Some(trimmed);
}
}
KeyCode::Backspace => {
if state.prepopulated_from_env {
state.value.clear();
state.prepopulated_from_env = false;
} else {
state.value.pop();
}
self.error = None;
should_request_frame = true;
}
KeyCode::Char(c)
if !key_event.modifiers.contains(KeyModifiers::CONTROL)
&& !key_event.modifiers.contains(KeyModifiers::ALT) =>
{
if state.prepopulated_from_env {
state.value.clear();
state.prepopulated_from_env = false;
}
state.value.push(c);
self.error = None;
should_request_frame = true;
}
_ => {}
}
// handled; let guard drop before potential save
} else {
return false;
}
}
if let Some(api_key) = should_save {
self.save_api_key(api_key);
} else if should_request_frame {
self.request_frame.schedule_frame();
}
true
}
fn handle_api_key_entry_paste(&mut self, pasted: String) -> bool {
let trimmed = pasted.trim();
if trimmed.is_empty() {
return false;
}
let mut guard = self.sign_in_state.write().unwrap();
if let SignInState::ApiKeyEntry(state) = &mut *guard {
if state.prepopulated_from_env {
state.value = trimmed.to_string();
state.prepopulated_from_env = false;
} else {
state.value.push_str(trimmed);
}
self.error = None;
} else {
return false;
}
drop(guard);
self.request_frame.schedule_frame();
true
}
fn start_api_key_entry(&mut self) {
self.error = None;
let prefill_from_env = read_openai_api_key_from_env();
let mut guard = self.sign_in_state.write().unwrap();
match &mut *guard {
SignInState::ApiKeyEntry(state) => {
if state.value.is_empty() {
if let Some(prefill) = prefill_from_env.clone() {
state.value = prefill;
state.prepopulated_from_env = true;
} else {
state.prepopulated_from_env = false;
}
}
}
_ => {
*guard = SignInState::ApiKeyEntry(ApiKeyInputState {
value: prefill_from_env.clone().unwrap_or_default(),
prepopulated_from_env: prefill_from_env.is_some(),
});
}
}
drop(guard);
self.request_frame.schedule_frame();
}
fn save_api_key(&mut self, api_key: String) {
match login_with_api_key(&self.codex_home, &api_key) {
Ok(()) => {
self.error = None;
self.login_status = LoginStatus::AuthMode(AuthMode::ApiKey);
self.auth_manager.reload();
*self.sign_in_state.write().unwrap() = SignInState::ApiKeyConfigured;
}
Err(err) => {
self.error = Some(format!("Failed to save API key: {err}"));
let mut guard = self.sign_in_state.write().unwrap();
if let SignInState::ApiKeyEntry(existing) = &mut *guard {
if existing.value.is_empty() {
existing.value.push_str(&api_key);
}
existing.prepopulated_from_env = false;
} else {
*guard = SignInState::ApiKeyEntry(ApiKeyInputState {
value: api_key,
prepopulated_from_env: false,
});
}
}
}
self.request_frame.schedule_frame();
.render(area, buf);
}
fn start_chatgpt_login(&mut self) {
@@ -529,6 +354,18 @@ impl AuthModeWidget {
}
}
}
/// TODO: Read/write from the correct hierarchy config overrides + auth json + OPENAI_API_KEY.
fn verify_api_key(&mut self) {
if matches!(self.login_status, LoginStatus::AuthMode(AuthMode::ApiKey)) {
// We already have an API key configured (e.g., from auth.json or env),
// so mark this step complete immediately.
*self.sign_in_state.write().unwrap() = SignInState::EnvVarFound;
} else {
*self.sign_in_state.write().unwrap() = SignInState::EnvVarMissing;
}
self.request_frame.schedule_frame();
}
}
impl StepStateProvider for AuthModeWidget {
@@ -536,10 +373,10 @@ impl StepStateProvider for AuthModeWidget {
let sign_in_state = self.sign_in_state.read().unwrap();
match &*sign_in_state {
SignInState::PickMode
| SignInState::ApiKeyEntry(_)
| SignInState::EnvVarMissing
| SignInState::ChatGptContinueInBrowser(_)
| SignInState::ChatGptSuccessMessage => StepState::InProgress,
SignInState::ChatGptSuccess | SignInState::ApiKeyConfigured => StepState::Complete,
SignInState::ChatGptSuccess | SignInState::EnvVarFound => StepState::Complete,
}
}
}
@@ -560,11 +397,11 @@ impl WidgetRef for AuthModeWidget {
SignInState::ChatGptSuccess => {
self.render_chatgpt_success(area, buf);
}
SignInState::ApiKeyEntry(state) => {
self.render_api_key_entry(area, buf, state);
SignInState::EnvVarMissing => {
self.render_env_var_missing(area, buf);
}
SignInState::ApiKeyConfigured => {
self.render_api_key_configured(area, buf);
SignInState::EnvVarFound => {
self.render_env_var_found(area, buf);
}
}
}

View File

@@ -34,7 +34,6 @@ enum Step {
pub(crate) trait KeyboardHandler {
fn handle_key_event(&mut self, key_event: KeyEvent);
fn handle_paste(&mut self, _pasted: String) {}
}
pub(crate) enum StepState {
@@ -70,6 +69,7 @@ impl OnboardingScreen {
auth_manager,
config,
} = args;
let preferred_auth_method = config.preferred_auth_method;
let cwd = config.cwd.clone();
let codex_home = config.codex_home.clone();
let mut steps: Vec<Step> = vec![Step::Welcome(WelcomeWidget {
@@ -84,6 +84,7 @@ impl OnboardingScreen {
codex_home: codex_home.clone(),
login_status,
auth_manager,
preferred_auth_method,
}))
}
let is_git_repo = get_git_repo_root(&cwd).is_some();
@@ -193,17 +194,6 @@ impl KeyboardHandler for OnboardingScreen {
};
self.request_frame.schedule_frame();
}
fn handle_paste(&mut self, pasted: String) {
if pasted.is_empty() {
return;
}
if let Some(active_step) = self.current_steps_mut().into_iter().last() {
active_step.handle_paste(pasted);
}
self.request_frame.schedule_frame();
}
}
impl WidgetRef for &OnboardingScreen {
@@ -273,14 +263,6 @@ impl KeyboardHandler for Step {
Step::TrustDirectory(widget) => widget.handle_key_event(key_event),
}
}
fn handle_paste(&mut self, pasted: String) {
match self {
Step::Welcome(_) => {}
Step::Auth(widget) => widget.handle_paste(pasted),
Step::TrustDirectory(widget) => widget.handle_paste(pasted),
}
}
}
impl StepStateProvider for Step {
@@ -330,14 +312,12 @@ pub(crate) async fn run_onboarding_app(
TuiEvent::Key(key_event) => {
onboarding_screen.handle_key_event(key_event);
}
TuiEvent::Paste(text) => {
onboarding_screen.handle_paste(text);
}
TuiEvent::Draw => {
let _ = tui.draw(u16::MAX, |frame| {
frame.render_widget_ref(&onboarding_screen, frame.area());
});
}
_ => {}
}
}
}

View File

@@ -8,7 +8,7 @@ Run Codex head-less in pipelines. Example GitHub Action step:
- name: Update changelog via Codex
run: |
npm install -g @openai/codex
codex login --api-key "${{ secrets.OPENAI_KEY }}"
export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}"
codex exec --full-auto "update CHANGELOG for next release"
```

View File

@@ -2,10 +2,10 @@
## Usage-based billing alternative: Use an OpenAI API key
If you prefer to pay-as-you-go, you can still authenticate with your OpenAI API key:
If you prefer to pay-as-you-go, you can still authenticate with your OpenAI API key by setting it as an environment variable:
```shell
codex login --api-key "your-api-key-here"
export OPENAI_API_KEY="your-api-key-here"
```
This key must, at minimum, have write access to the Responses API.
@@ -18,6 +18,36 @@ If you've used the Codex CLI before with usage-based billing via an API key and
2. Delete `~/.codex/auth.json` (on Windows: `C:\\Users\\USERNAME\\.codex\\auth.json`)
3. Run `codex login` again
## Forcing a specific auth method (advanced)
You can explicitly choose which authentication Codex should prefer when both are available.
- To always use your API key (even when ChatGPT auth exists), set:
```toml
# ~/.codex/config.toml
preferred_auth_method = "apikey"
```
Or override ad-hoc via CLI:
```bash
codex --config preferred_auth_method="apikey"
```
- To prefer ChatGPT auth (default), set:
```toml
# ~/.codex/config.toml
preferred_auth_method = "chatgpt"
```
Notes:
- When `preferred_auth_method = "apikey"` and an API key is available, the login screen is skipped.
- When `preferred_auth_method = "chatgpt"` (default), Codex prefers ChatGPT auth if present; if only an API key is present, it will use the API key. Certain account types may also require API-key mode.
- To check which auth method is being used during a session, use the `/status` command in the TUI.
## Connecting on a "Headless" Machine
Today, the login process entails running a server on `localhost:1455`. If you are on a "headless" server, such as a Docker container or are `ssh`'d into a remote machine, loading `localhost:1455` in the browser on your local machine will not automatically connect to the webserver running on the _headless_ machine, so you must use one of the following workarounds:

View File

@@ -612,4 +612,5 @@ Options that are specific to the TUI.
| `experimental_use_exec_command_tool` | boolean | Use experimental exec command tool. |
| `responses_originator_header_internal_override` | string | Override `originator` header value. |
| `projects.<path>.trust_level` | string | Mark project/worktree as trusted (only `"trusted"` is recognized). |
| `preferred_auth_method` | `chatgpt` \| `apikey` | Select default auth method (default: `chatgpt`). |
| `tools.web_search` | boolean | Enable web search tool (alias: `web_search_request`) (default: false). |