Compare commits

..

1 Commits

Author SHA1 Message Date
rakesh
782a3005c1 Introduce device code login for codex cli 2025-09-11 16:49:45 -07:00
32 changed files with 281 additions and 732 deletions

View File

@@ -2,10 +2,7 @@
<p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install codex</code></p>
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.
</br>
</br>If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE</a>
</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a></p>
<p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer.</br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, see <a href="https://chatgpt.com/codex">chatgpt.com/codex</a>.</p>
<p align="center">
<img src="./.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" />

View File

@@ -55,6 +55,17 @@ pub async fn run_login_with_api_key(
}
}
/// Login using the OAuth device code flow.
///
/// Currently not implemented; exits with a clear message.
pub async fn run_login_with_device_code(cli_config_overrides: CliConfigOverrides) -> ! {
// Parse and load config for consistency with other login commands.
let _config = load_config_or_exit(cli_config_overrides);
eprintln!("Device code login is not supported yet.");
std::process::exit(2);
}
pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
let config = load_config_or_exit(cli_config_overrides);

View File

@@ -10,6 +10,7 @@ use codex_cli::SeatbeltCommand;
use codex_cli::login::run_login_status;
use codex_cli::login::run_login_with_api_key;
use codex_cli::login::run_login_with_chatgpt;
use codex_cli::login::run_login_with_device_code;
use codex_cli::login::run_logout;
use codex_cli::proto;
use codex_common::CliConfigOverrides;
@@ -108,6 +109,10 @@ struct LoginCommand {
#[arg(long = "api-key", value_name = "API_KEY")]
api_key: Option<String>,
/// Use device code flow (not yet supported)
#[arg(long = "use-device-code")]
use_device_code: bool,
#[command(subcommand)]
action: Option<LoginSubcommand>,
}
@@ -168,7 +173,9 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
run_login_status(login_cli.config_overrides).await;
}
None => {
if let Some(api_key) = login_cli.api_key {
if login_cli.use_device_code {
run_login_with_device_code(login_cli.config_overrides).await;
} else if let Some(api_key) = login_cli.api_key {
run_login_with_api_key(login_cli.config_overrides, api_key).await;
} else {
run_login_with_chatgpt(login_cli.config_overrides).await;

View File

@@ -2,7 +2,7 @@ use std::time::Duration;
use std::time::Instant;
/// Returns a string representing the elapsed time since `start_time` like
/// "1m 15s" or "1.50s".
/// "1m15s" or "1.50s".
pub fn format_elapsed(start_time: Instant) -> String {
format_duration(start_time.elapsed())
}
@@ -12,7 +12,7 @@ pub fn format_elapsed(start_time: Instant) -> String {
/// Formatting rules:
/// * < 1 s -> "{milli}ms"
/// * < 60 s -> "{sec:.2}s" (two decimal places)
/// * >= 60 s -> "{min}m {sec:02}s"
/// * >= 60 s -> "{min}m{sec:02}s"
pub fn format_duration(duration: Duration) -> String {
let millis = duration.as_millis() as i64;
format_elapsed_millis(millis)
@@ -26,7 +26,7 @@ fn format_elapsed_millis(millis: i64) -> String {
} else {
let minutes = millis / 60_000;
let seconds = (millis % 60_000) / 1000;
format!("{minutes}m {seconds:02}s")
format!("{minutes}m{seconds:02}s")
}
}
@@ -61,18 +61,12 @@ mod tests {
fn test_format_duration_minutes() {
// Durations ≥ 1 minute should be printed mmss.
let dur = Duration::from_millis(75_000); // 1m15s
assert_eq!(format_duration(dur), "1m 15s");
assert_eq!(format_duration(dur), "1m15s");
let dur_exact = Duration::from_millis(60_000); // 1m0s
assert_eq!(format_duration(dur_exact), "1m 00s");
assert_eq!(format_duration(dur_exact), "1m00s");
let dur_long = Duration::from_millis(3_601_000);
assert_eq!(format_duration(dur_long), "60m 01s");
}
#[test]
fn test_format_duration_one_hour_has_space() {
let dur_hour = Duration::from_millis(3_600_000);
assert_eq!(format_duration(dur_hour), "60m 00s");
assert_eq!(format_duration(dur_long), "60m01s");
}
}

View File

@@ -240,10 +240,10 @@ impl ModelClient {
let res = req_builder.send().await;
if let Ok(resp) = &res {
trace!(
"Response status: {}, cf-ray: {}",
"Response status: {}, request-id: {}",
resp.status(),
resp.headers()
.get("cf-ray")
.get("x-request-id")
.map(|v| v.to_str().unwrap_or_default())
.unwrap_or_default()
);

View File

@@ -9,6 +9,9 @@ use std::sync::atomic::AtomicU64;
use std::time::Duration;
use crate::AuthManager;
use crate::config_edit::CONFIG_KEY_EFFORT;
use crate::config_edit::CONFIG_KEY_MODEL;
use crate::config_edit::persist_non_null_overrides;
use crate::event_mapping::map_response_item_to_event_messages;
use async_channel::Receiver;
use async_channel::Sender;
@@ -462,6 +465,7 @@ impl Session {
tools_config: ToolsConfig::new(&ToolsConfigParams {
model_family: &config.model_family,
approval_policy,
sandbox_policy: sandbox_policy.clone(),
include_plan_tool: config.include_plan_tool,
include_apply_patch_tool: config.include_apply_patch_tool,
include_web_search_request: config.tools_web_search_request,
@@ -501,7 +505,6 @@ impl Session {
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: conversation_id,
model,
reasoning_effort: model_reasoning_effort,
history_log_id,
history_entry_count,
initial_messages,
@@ -685,6 +688,12 @@ impl Session {
if let Some(user_instructions) = turn_context.user_instructions.as_deref() {
items.push(UserInstructions::new(user_instructions.to_string()).into());
}
items.push(ResponseItem::from(EnvironmentContext::new(
Some(turn_context.cwd.clone()),
Some(turn_context.approval_policy),
Some(turn_context.sandbox_policy.clone()),
Some(self.user_shell.clone()),
)));
items
}
@@ -1136,6 +1145,7 @@ async fn submission_loop(
let tools_config = ToolsConfig::new(&ToolsConfigParams {
model_family: &effective_family,
approval_policy: new_approval_policy,
sandbox_policy: new_sandbox_policy.clone(),
include_plan_tool: config.include_plan_tool,
include_apply_patch_tool: config.include_apply_patch_tool,
include_web_search_request: config.tools_web_search_request,
@@ -1157,18 +1167,42 @@ async fn submission_loop(
// Install the new persistent context for subsequent tasks/turns.
turn_context = Arc::new(new_turn_context);
// Optionally persist changes to model / effort
let effort_str = effort.map(|_| effective_effort.to_string());
if let Err(e) = persist_non_null_overrides(
&config.codex_home,
config.active_profile.as_deref(),
&[
(&[CONFIG_KEY_MODEL], model.as_deref()),
(&[CONFIG_KEY_EFFORT], effort_str.as_deref()),
],
)
.await
{
warn!("failed to persist overrides: {e:#}");
}
if cwd.is_some() || approval_policy.is_some() || sandbox_policy.is_some() {
sess.record_conversation_items(&[ResponseItem::from(EnvironmentContext::new(
cwd,
approval_policy,
sandbox_policy,
// Shell is not configurable from turn to turn
None,
))])
.await;
}
}
Op::UserInput { items } => {
submit_user_input(
turn_context.cwd.clone(),
turn_context.approval_policy,
turn_context.sandbox_policy.clone(),
&sess,
&turn_context,
sub.id.clone(),
items,
)
.await;
// attempt to inject input into current task
if let Err(items) = sess.inject_input(items) {
// no current task, spawn a new one
let task =
AgentTask::spawn(sess.clone(), Arc::clone(&turn_context), sub.id, items);
sess.set_task(task);
}
}
Op::UserTurn {
items,
@@ -1213,6 +1247,7 @@ async fn submission_loop(
tools_config: ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy,
sandbox_policy: sandbox_policy.clone(),
include_plan_tool: config.include_plan_tool,
include_apply_patch_tool: config.include_apply_patch_tool,
include_web_search_request: config.tools_web_search_request,
@@ -1229,16 +1264,11 @@ async fn submission_loop(
shell_environment_policy: turn_context.shell_environment_policy.clone(),
cwd,
};
submit_user_input(
fresh_turn_context.cwd.clone(),
fresh_turn_context.approval_policy,
fresh_turn_context.sandbox_policy.clone(),
&sess,
&Arc::new(fresh_turn_context),
sub.id.clone(),
items,
)
.await;
// TODO: record the new environment context in the conversation history
// no current task, spawn a new one with the perturn context
let task =
AgentTask::spawn(sess.clone(), Arc::new(fresh_turn_context), sub.id, items);
sess.set_task(task);
}
}
Op::ExecApproval { id, decision } => match decision {
@@ -2813,29 +2843,6 @@ async fn handle_sandbox_error(
}
}
async fn submit_user_input(
cwd: PathBuf,
approval_policy: AskForApproval,
sandbox_policy: SandboxPolicy,
sess: &Arc<Session>,
turn_context: &Arc<TurnContext>,
sub_id: String,
items: Vec<InputItem>,
) {
sess.record_conversation_items(&[ResponseItem::from(EnvironmentContext::new(
Some(cwd),
Some(approval_policy),
Some(sandbox_policy),
Some(sess.user_shell.clone()),
))])
.await;
if let Err(items) = sess.inject_input(items) {
// no current task, spawn a new one
let task = AgentTask::spawn(Arc::clone(sess), Arc::clone(turn_context), sub_id, items);
sess.set_task(task);
}
}
fn format_exec_output_str(exec_output: &ExecToolCallOutput) -> String {
let ExecToolCallOutput {
aggregated_output, ..

View File

@@ -495,7 +495,7 @@ fn apply_toml_override(root: &mut TomlValue, path: &str, value: TomlValue) {
}
/// Base config deserialized from ~/.codex/config.toml.
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
#[derive(Deserialize, Debug, Clone, Default)]
pub struct ConfigToml {
/// Optional override of model selection.
pub model: Option<String>,
@@ -627,7 +627,7 @@ pub struct ProjectConfig {
pub trust_level: Option<String>,
}
#[derive(Deserialize, Debug, Clone, Default, PartialEq)]
#[derive(Deserialize, Debug, Clone, Default)]
pub struct ToolsToml {
#[serde(default, alias = "web_search_request")]
pub web_search: Option<bool>,

View File

@@ -8,6 +8,7 @@ use std::collections::HashMap;
use crate::model_family::ModelFamily;
use crate::plan_tool::PLAN_TOOL;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy;
use crate::tool_apply_patch::ApplyPatchToolType;
use crate::tool_apply_patch::create_apply_patch_freeform_tool;
use crate::tool_apply_patch::create_apply_patch_json_tool;
@@ -57,7 +58,7 @@ pub(crate) enum OpenAiTool {
#[derive(Debug, Clone)]
pub enum ConfigShellToolType {
DefaultShell,
ShellWithRequest,
ShellWithRequest { sandbox_policy: SandboxPolicy },
LocalShell,
StreamableShell,
}
@@ -75,6 +76,7 @@ pub(crate) struct ToolsConfig {
pub(crate) struct ToolsConfigParams<'a> {
pub(crate) model_family: &'a ModelFamily,
pub(crate) approval_policy: AskForApproval,
pub(crate) sandbox_policy: SandboxPolicy,
pub(crate) include_plan_tool: bool,
pub(crate) include_apply_patch_tool: bool,
pub(crate) include_web_search_request: bool,
@@ -88,6 +90,7 @@ impl ToolsConfig {
let ToolsConfigParams {
model_family,
approval_policy,
sandbox_policy,
include_plan_tool,
include_apply_patch_tool,
include_web_search_request,
@@ -103,7 +106,9 @@ impl ToolsConfig {
ConfigShellToolType::DefaultShell
};
if matches!(approval_policy, AskForApproval::OnRequest) && !use_streamable_shell_tool {
shell_type = ConfigShellToolType::ShellWithRequest;
shell_type = ConfigShellToolType::ShellWithRequest {
sandbox_policy: sandbox_policy.clone(),
}
}
let apply_patch_tool_type = match model_family.apply_patch_tool_type {
@@ -246,9 +251,7 @@ fn create_unified_exec_tool() -> OpenAiTool {
})
}
const SHELL_TOOL_DESCRIPTION: &str = r#"Runs a shell command and returns its output"#;
fn create_shell_tool_for_request() -> OpenAiTool {
fn create_shell_tool_for_sandbox(sandbox_policy: &SandboxPolicy) -> OpenAiTool {
let mut properties = BTreeMap::new();
properties.insert(
"command".to_string(),
@@ -260,29 +263,79 @@ fn create_shell_tool_for_request() -> OpenAiTool {
properties.insert(
"workdir".to_string(),
JsonSchema::String {
description: Some("Working directory to execute the command in.".to_string()),
description: Some("The working directory to execute the command in".to_string()),
},
);
properties.insert(
"timeout_ms".to_string(),
JsonSchema::Number {
description: Some("Timeout for the command in milliseconds.".to_string()),
},
);
properties.insert(
"with_escalated_permissions".to_string(),
JsonSchema::Boolean {
description: Some("Request escalated permissions, only for when a command would otherwise be blocked by the sandbox.".to_string()),
},
);
properties.insert(
"justification".to_string(),
JsonSchema::String {
description: Some("Required if and only if with_escalated_permissions == true. One sentence explaining why escalation is needed (e.g., write outside CWD, network fetch, git commit).".to_string()),
description: Some("The timeout for the command in milliseconds".to_string()),
},
);
let description = SHELL_TOOL_DESCRIPTION.to_string();
if matches!(sandbox_policy, SandboxPolicy::WorkspaceWrite { .. }) {
properties.insert(
"with_escalated_permissions".to_string(),
JsonSchema::Boolean {
description: Some("Whether to request escalated permissions. Set to true if command needs to be run without sandbox restrictions".to_string()),
},
);
properties.insert(
"justification".to_string(),
JsonSchema::String {
description: Some("Only set if with_escalated_permissions is true. 1-sentence explanation of why we want to run this command.".to_string()),
},
);
}
let description = match sandbox_policy {
SandboxPolicy::WorkspaceWrite {
network_access,
..
} => {
let network_line = if !network_access {
"\n - Commands that require network access"
} else {
""
};
format!(
r#"
The shell tool is used to execute shell commands.
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands will require escalated privileges:
- Types of actions that require escalated privileges:
- Writing files other than those in the writable roots (see the environment context for the allowed directories){network_line}
- Examples of commands that require escalated privileges:
- git commit
- npm install or pnpm install
- cargo build
- cargo test
- When invoking a command that will require escalated privileges:
- Provide the with_escalated_permissions parameter with the boolean value true
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter."#,
)
}
SandboxPolicy::DangerFullAccess => {
"Runs a shell command and returns its output.".to_string()
}
SandboxPolicy::ReadOnly => {
r#"
The shell tool is used to execute shell commands.
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands (including apply_patch) will require escalated permissions:
- Types of actions that require escalated privileges:
- Writing files
- Applying patches
- Examples of commands that require escalated privileges:
- apply_patch
- git commit
- npm install or pnpm install
- cargo build
- cargo test
- When invoking a command that will require escalated privileges:
- Provide the with_escalated_permissions parameter with the boolean value true
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter"#.to_string()
}
};
OpenAiTool::Function(ResponsesApiTool {
name: "shell".to_string(),
@@ -295,6 +348,7 @@ fn create_shell_tool_for_request() -> OpenAiTool {
},
})
}
fn create_view_image_tool() -> OpenAiTool {
// Support only local filesystem path.
let mut properties = BTreeMap::new();
@@ -535,8 +589,8 @@ pub(crate) fn get_openai_tools(
ConfigShellToolType::DefaultShell => {
tools.push(create_shell_tool());
}
ConfigShellToolType::ShellWithRequest => {
tools.push(create_shell_tool_for_request());
ConfigShellToolType::ShellWithRequest { sandbox_policy } => {
tools.push(create_shell_tool_for_sandbox(sandbox_policy));
}
ConfigShellToolType::LocalShell => {
tools.push(OpenAiTool::LocalShell {});
@@ -632,6 +686,7 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: true,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -653,6 +708,7 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: true,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -674,6 +730,7 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -779,6 +836,7 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: false,
@@ -856,6 +914,7 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -918,6 +977,7 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -975,6 +1035,7 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -1035,6 +1096,7 @@ mod tests {
let config = ToolsConfig::new(&ToolsConfigParams {
model_family: &model_family,
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::ReadOnly,
include_plan_tool: false,
include_apply_patch_tool: false,
include_web_search_request: true,
@@ -1088,7 +1150,13 @@ mod tests {
#[test]
fn test_shell_tool_for_sandbox_workspace_write() {
let tool = super::create_shell_tool_for_request();
let sandbox_policy = SandboxPolicy::WorkspaceWrite {
writable_roots: vec!["workspace".into()],
network_access: false,
exclude_tmpdir_env_var: false,
exclude_slash_tmp: false,
};
let tool = super::create_shell_tool_for_sandbox(&sandbox_policy);
let OpenAiTool::Function(ResponsesApiTool {
description, name, ..
}) = &tool
@@ -1097,13 +1165,26 @@ mod tests {
};
assert_eq!(name, "shell");
let expected = super::SHELL_TOOL_DESCRIPTION;
let expected = r#"
The shell tool is used to execute shell commands.
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands will require escalated privileges:
- Types of actions that require escalated privileges:
- Writing files other than those in the writable roots (see the environment context for the allowed directories)
- Commands that require network access
- Examples of commands that require escalated privileges:
- git commit
- npm install or pnpm install
- cargo build
- cargo test
- When invoking a command that will require escalated privileges:
- Provide the with_escalated_permissions parameter with the boolean value true
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter."#;
assert_eq!(description, expected);
}
#[test]
fn test_shell_tool_for_sandbox_readonly() {
let tool = super::create_shell_tool_for_request();
let tool = super::create_shell_tool_for_sandbox(&SandboxPolicy::ReadOnly);
let OpenAiTool::Function(ResponsesApiTool {
description, name, ..
}) = &tool
@@ -1112,13 +1193,27 @@ mod tests {
};
assert_eq!(name, "shell");
let expected = super::SHELL_TOOL_DESCRIPTION;
let expected = r#"
The shell tool is used to execute shell commands.
- When invoking the shell tool, your call will be running in a sandbox, and some shell commands (including apply_patch) will require escalated permissions:
- Types of actions that require escalated privileges:
- Writing files
- Applying patches
- Examples of commands that require escalated privileges:
- apply_patch
- git commit
- npm install or pnpm install
- cargo build
- cargo test
- When invoking a command that will require escalated privileges:
- Provide the with_escalated_permissions parameter with the boolean value true
- Include a short, 1 sentence explanation for why we need to run with_escalated_permissions in the justification parameter"#;
assert_eq!(description, expected);
}
#[test]
fn test_shell_tool_for_sandbox_danger_full_access() {
let tool = super::create_shell_tool_for_request();
let tool = super::create_shell_tool_for_sandbox(&SandboxPolicy::DangerFullAccess);
let OpenAiTool::Function(ResponsesApiTool {
description, name, ..
}) = &tool
@@ -1127,7 +1222,6 @@ mod tests {
};
assert_eq!(name, "shell");
let expected = super::SHELL_TOOL_DESCRIPTION;
assert_eq!(description, expected);
assert_eq!(description, "Runs a shell command and returns its output.");
}
}

View File

@@ -26,7 +26,7 @@ const PROJECT_DOC_SEPARATOR: &str = "\n\n--- project-doc ---\n\n";
/// Combines `Config::instructions` and `AGENTS.md` (if present) into a single
/// string of instructions.
pub async fn get_user_instructions(config: &Config) -> Option<String> {
pub(crate) async fn get_user_instructions(config: &Config) -> Option<String> {
match read_project_docs(config).await {
Ok(Some(project_doc)) => match &config.user_instructions {
Some(original_instructions) => Some(format!(

View File

@@ -4,11 +4,9 @@ use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
use codex_core::WireApi;
use codex_core::built_in_model_providers;
use codex_core::project_doc::get_user_instructions;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use codex_core::shell::default_user_shell;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id;
@@ -223,8 +221,6 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
};
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
let cwd = TempDir::new().unwrap();
config.cwd = cwd.path().to_path_buf();
config.model_provider = model_provider;
config.experimental_resume = Some(session_path.clone());
// Also configure user instructions to ensure they are NOT delivered on resume.
@@ -263,29 +259,6 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
let request = &server.received_requests().await.unwrap()[0];
let request_body = request.body_json::<serde_json::Value>().unwrap();
// Build expected environment context for this turn.
let shell = default_user_shell().await;
let shell_line = match shell.name() {
Some(name) => format!(" <shell>{name}</shell>\n"),
None => String::new(),
};
let expected_env_text_turn = format!(
r#"<environment_context>
<cwd>{}</cwd>
<approval_policy>on-request</approval_policy>
<sandbox_mode>read-only</sandbox_mode>
<network_access>restricted</network_access>
{}</environment_context>"#,
cwd.path().to_string_lossy(),
shell_line.as_str(),
);
let expected_env_msg_turn = json!({
"type": "message",
"role": "user",
"content": [ { "type": "input_text", "text": expected_env_text_turn } ]
});
let expected_input = json!([
{
"type": "message",
@@ -297,14 +270,12 @@ async fn resume_includes_initial_messages_and_sends_prior_items() {
"role": "assistant",
"content": [{ "type": "output_text", "text": "resumed assistant message" }]
},
expected_env_msg_turn,
{
"type": "message",
"role": "user",
"content": [{ "type": "input_text", "text": "hello" }]
}
]);
assert_eq!(request_body["input"], expected_input);
}
@@ -867,7 +838,7 @@ async fn history_dedupes_streamed_and_final_messages_across_turns() {
conversation: codex,
..
} = conversation_manager
.new_conversation(config.clone())
.new_conversation(config)
.await
.expect("create new conversation");
@@ -902,49 +873,34 @@ async fn history_dedupes_streamed_and_final_messages_across_turns() {
let requests = server.received_requests().await.unwrap();
assert_eq!(requests.len(), 3, "expected 3 requests (one per turn)");
// Build expected environment context dynamically to avoid OS-dependent flakiness.
let user_instructions = get_user_instructions(&config).await;
let shell = default_user_shell().await;
let shell_line = match shell.name() {
Some(name) => format!(" <shell>{name}</shell>\n"),
None => String::new(),
};
let expected_env_text = format!(
r#"<environment_context>
<cwd>{}</cwd>
<approval_policy>on-request</approval_policy>
<sandbox_mode>read-only</sandbox_mode>
<network_access>restricted</network_access>
{}</environment_context>"#,
std::env::current_dir().unwrap().to_string_lossy(),
shell_line.as_str(),
);
let expected_env_msg = json!({
"type": "message",
"role": "user",
"content": [ { "type": "input_text", "text": expected_env_text } ]
});
// Wrap user instructions in the XML container to match the raw/ingest view
let expected_ui_text = format!(
"<user_instructions>\n\n{}\n\n</user_instructions>",
user_instructions.clone().unwrap()
);
let expected_ui_msg = json!({
"type": "message",
"role": "user",
"content": [ { "type": "input_text", "text": expected_ui_text } ]
});
let expected_full = json!([
expected_ui_msg,
expected_env_msg.clone(),
{"type":"message","role":"user","content":[{"type":"input_text","text":"U1"}]},
{"type":"message","role":"assistant","content":[{"type":"output_text","text":"Hey there!\n"}]},
expected_env_msg.clone(),
{"type":"message","role":"user","content":[{"type":"input_text","text":"U2"}]},
{"type":"message","role":"assistant","content":[{"type":"output_text","text":"Hey there!\n"}]},
expected_env_msg,
{"type":"message","role":"user","content":[{"type":"input_text","text":"U3"}]}]);
// Replace full-array compare with tail-only raw JSON compare using a single hard-coded value.
let r3_tail_expected = json!([
{
"type": "message",
"role": "user",
"content": [{"type":"input_text","text":"U1"}]
},
{
"type": "message",
"role": "assistant",
"content": [{"type":"output_text","text":"Hey there!\n"}]
},
{
"type": "message",
"role": "user",
"content": [{"type":"input_text","text":"U2"}]
},
{
"type": "message",
"role": "assistant",
"content": [{"type":"output_text","text":"Hey there!\n"}]
},
{
"type": "message",
"role": "user",
"content": [{"type":"input_text","text":"U3"}]
}
]);
let r3_input_array = requests[2]
.body_json::<serde_json::Value>()
@@ -953,6 +909,12 @@ async fn history_dedupes_streamed_and_final_messages_across_turns() {
.and_then(|v| v.as_array())
.cloned()
.expect("r3 missing input array");
assert_eq!(json!(r3_input_array), expected_full);
// skipping earlier context and developer messages
let tail_len = r3_tail_expected.as_array().unwrap().len();
let actual_tail = &r3_input_array[r3_input_array.len() - tail_len..];
assert_eq!(
serde_json::Value::Array(actual_tail.to_vec()),
r3_tail_expected,
"request 3 tail mismatch",
);
}

View File

@@ -7,7 +7,6 @@ mod exec;
mod exec_stream_events;
mod fork_conversation;
mod live_cli;
mod model_overrides;
mod prompt_caching;
mod seatbelt;
mod stream_error_allows_next_turn;

View File

@@ -1,92 +0,0 @@
use codex_core::CodexAuth;
use codex_core::ConversationManager;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;
use codex_core::protocol_config_types::ReasoningEffort;
use core_test_support::load_default_config_for_test;
use core_test_support::wait_for_event;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
const CONFIG_TOML: &str = "config.toml";
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn override_turn_context_does_not_persist_when_config_exists() {
let codex_home = TempDir::new().unwrap();
let config_path = codex_home.path().join(CONFIG_TOML);
let initial_contents = "model = \"gpt-4o\"\n";
tokio::fs::write(&config_path, initial_contents)
.await
.expect("seed config.toml");
let mut config = load_default_config_for_test(&codex_home);
config.model = "gpt-4o".to_string();
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let codex = conversation_manager
.new_conversation(config)
.await
.expect("create conversation")
.conversation;
codex
.submit(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
model: Some("o3".to_string()),
effort: Some(ReasoningEffort::High),
summary: None,
})
.await
.expect("submit override");
codex.submit(Op::Shutdown).await.expect("request shutdown");
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await;
let contents = tokio::fs::read_to_string(&config_path)
.await
.expect("read config.toml after override");
assert_eq!(contents, initial_contents);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn override_turn_context_does_not_create_config_file() {
let codex_home = TempDir::new().unwrap();
let config_path = codex_home.path().join(CONFIG_TOML);
assert!(
!config_path.exists(),
"test setup should start without config"
);
let config = load_default_config_for_test(&codex_home);
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let codex = conversation_manager
.new_conversation(config)
.await
.expect("create conversation")
.conversation;
codex
.submit(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
model: Some("o3".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: None,
})
.await
.expect("submit override");
codex.submit(Op::Shutdown).await.expect("request shutdown");
wait_for_event(&codex, |ev| matches!(ev, EventMsg::ShutdownComplete)).await;
assert!(
!config_path.exists(),
"override should not create config.toml"
);
}

View File

@@ -270,13 +270,8 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
assert_eq!(requests.len(), 2, "expected two POST requests");
let shell = default_user_shell().await;
let shell_line = match shell.name() {
Some(name) => format!(" <shell>{name}</shell>\n"),
None => String::new(),
};
// Per-turn environment context includes the shell tag.
let expected_env_text_turn = format!(
let expected_env_text = format!(
r#"<environment_context>
<cwd>{}</cwd>
<approval_policy>on-request</approval_policy>
@@ -284,15 +279,18 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
<network_access>restricted</network_access>
{}</environment_context>"#,
cwd.path().to_string_lossy(),
shell_line.as_str(),
match shell.name() {
Some(name) => format!(" <shell>{name}</shell>\n"),
None => String::new(),
}
);
let expected_ui_text =
"<user_instructions>\n\nbe consistent and helpful\n\n</user_instructions>";
let expected_env_msg_turn = serde_json::json!({
let expected_env_msg = serde_json::json!({
"type": "message",
"role": "user",
"content": [ { "type": "input_text", "text": expected_env_text_turn } ]
"content": [ { "type": "input_text", "text": expected_env_text } ]
});
let expected_ui_msg = serde_json::json!({
"type": "message",
@@ -306,29 +304,11 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
"content": [ { "type": "input_text", "text": "hello 1" } ]
});
let body1 = requests[0].body_json::<serde_json::Value>().unwrap();
let body1_input = body1["input"].as_array().unwrap();
assert_eq!(
body1["input"],
serde_json::json!([
expected_ui_msg,
expected_env_msg_turn,
expected_user_message_1
])
serde_json::json!([expected_ui_msg, expected_env_msg, expected_user_message_1])
);
let env_texts: Vec<&str> = body1_input
.iter()
.filter_map(|msg| {
msg.get("content")
.and_then(|content| content.as_array())
.and_then(|content| content.first())
.and_then(|item| item.get("text"))
.and_then(|text| text.as_str())
})
.filter(|text| text.starts_with("<environment_context>"))
.collect();
assert_eq!(env_texts, vec![expected_env_text_turn.as_str()]);
let expected_user_message_2 = serde_json::json!({
"type": "message",
"role": "user",
@@ -338,7 +318,7 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests
let expected_body2 = serde_json::json!(
[
body1["input"].as_array().unwrap().as_slice(),
[expected_env_msg_turn, expected_user_message_2].as_slice(),
[expected_user_message_2].as_slice(),
]
.concat()
);
@@ -443,28 +423,19 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() {
"role": "user",
"content": [ { "type": "input_text", "text": "hello 2" } ]
});
let shell = default_user_shell().await;
let shell_line = match shell.name() {
Some(name) => format!(" <shell>{name}</shell>\n"),
None => String::new(),
};
// After overriding the turn context, the environment context should be emitted again
// reflecting the new approval policy and sandbox settings. Omit cwd because it did
// not change.
let expected_env_text_2 = format!(
r#"<environment_context>
<cwd>{}</cwd>
<approval_policy>never</approval_policy>
<sandbox_mode>workspace-write</sandbox_mode>
<network_access>enabled</network_access>
<writable_roots>
<root>{}</root>
</writable_roots>
{}</environment_context>"#,
cwd.path().to_string_lossy(),
writable.path().to_string_lossy(),
shell_line.as_str()
</environment_context>"#,
writable.path().to_string_lossy()
);
let expected_env_msg_2 = serde_json::json!({
"type": "message",
@@ -575,165 +546,12 @@ async fn per_turn_overrides_keep_cached_prefix_and_key_constant() {
"role": "user",
"content": [ { "type": "input_text", "text": "hello 2" } ]
});
let shell = default_user_shell().await;
let shell_line = match shell.name() {
Some(name) => format!(" <shell>{name}</shell>\n"),
None => String::new(),
};
let expected_env_text_2 = format!(
r#"<environment_context>
<cwd>{}</cwd>
<approval_policy>never</approval_policy>
<sandbox_mode>workspace-write</sandbox_mode>
<network_access>enabled</network_access>
<writable_roots>
<root>{}</root>
</writable_roots>
{}</environment_context>"#,
new_cwd.path().to_string_lossy(),
writable.path().to_string_lossy(),
shell_line.as_str()
);
let expected_env_msg_2 = serde_json::json!({
"type": "message",
"role": "user",
"content": [ { "type": "input_text", "text": expected_env_text_2 } ]
});
let expected_body2 = serde_json::json!(
[
body1["input"].as_array().unwrap().as_slice(),
[expected_env_msg_2, expected_user_message_2].as_slice(),
[expected_user_message_2].as_slice(),
]
.concat()
);
assert_eq!(body2["input"], expected_body2);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn tools_stable_across_all_approval_policy_transitions() {
use pretty_assertions::assert_eq;
let server = MockServer::start().await;
let sse = sse_completed("resp");
let template = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream");
// Build all transitions FROM each to each other (exclude self transitions)
let policies = vec![
AskForApproval::UnlessTrusted,
AskForApproval::OnFailure,
AskForApproval::OnRequest,
AskForApproval::Never,
];
let mut transitions: Vec<(AskForApproval, AskForApproval)> = Vec::new();
for &from in &policies {
for &to in &policies {
if from != to {
transitions.push((from, to));
}
}
}
// Expect 2 POSTs per transition
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(template)
.expect((transitions.len() * 2) as u64)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let cwd = TempDir::new().unwrap();
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.cwd = cwd.path().to_path_buf();
config.model_provider = model_provider;
config.user_instructions = Some("be consistent and helpful".to_string());
// Keep tools stable and minimal
config.include_plan_tool = false;
config.include_apply_patch_tool = false;
config.tools_web_search_request = false;
config.use_experimental_unified_exec_tool = true; // policy-independent tool
let conversation_manager =
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
let codex = conversation_manager
.new_conversation(config)
.await
.expect("create new conversation")
.conversation;
for (i, (from, to)) in transitions.iter().enumerate() {
// Ensure a known starting policy for this pair
codex
.submit(Op::OverrideTurnContext {
cwd: None,
approval_policy: Some(*from),
sandbox_policy: None,
model: None,
effort: None,
summary: None,
})
.await
.unwrap();
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: format!("turn {i}-a"),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// Override to the target policy and send next turn
codex
.submit(Op::OverrideTurnContext {
cwd: None,
approval_policy: Some(*to),
sandbox_policy: None,
model: None,
effort: None,
summary: None,
})
.await
.unwrap();
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: format!("turn {i}-b"),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
}
// Verify tool arrays are identical across each pair of requests
let requests = server.received_requests().await.unwrap();
assert_eq!(
requests.len(),
transitions.len() * 2,
"expected 2 requests per transition"
);
for i in 0..transitions.len() {
let body_a = requests[2 * i].body_json::<serde_json::Value>().unwrap();
let body_b = requests[2 * i + 1]
.body_json::<serde_json::Value>()
.unwrap();
assert_eq!(
body_a["tools"], body_b["tools"],
"tools changed between requests for transition #{i}: {:?}",
transitions[i]
);
}
}

View File

@@ -520,7 +520,6 @@ impl EventProcessor for EventProcessorWithHumanOutput {
let SessionConfiguredEvent {
session_id: conversation_id,
model,
reasoning_effort: _,
history_log_id: _,
history_entry_count: _,
initial_messages: _,

View File

@@ -31,13 +31,6 @@ install:
rustup show active-toolchain
cargo fetch
# Run `cargo nextest` since it's faster than `cargo test`, though including
# --no-fail-fast is important to ensure all tests are run.
#
# Run `cargo install cargo-nextest` if you don't have it installed.
test:
cargo nextest run --no-fail-fast
# Run the MCP server
mcp-server-run *args:
cargo run -p codex-mcp-server -- "$@"

View File

@@ -18,9 +18,6 @@ use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::ConfigToml;
use codex_core::config::load_config_as_toml;
use codex_core::config_edit::CONFIG_KEY_EFFORT;
use codex_core::config_edit::CONFIG_KEY_MODEL;
use codex_core::config_edit::persist_non_null_overrides;
use codex_core::default_client::get_codex_user_agent;
use codex_core::exec::ExecParams;
use codex_core::exec_env::create_env;
@@ -74,8 +71,6 @@ use codex_protocol::mcp_protocol::SendUserMessageResponse;
use codex_protocol::mcp_protocol::SendUserTurnParams;
use codex_protocol::mcp_protocol::SendUserTurnResponse;
use codex_protocol::mcp_protocol::ServerNotification;
use codex_protocol::mcp_protocol::SetDefaultModelParams;
use codex_protocol::mcp_protocol::SetDefaultModelResponse;
use codex_protocol::mcp_protocol::UserInfoResponse;
use codex_protocol::mcp_protocol::UserSavedConfig;
use codex_protocol::models::ContentItem;
@@ -197,9 +192,6 @@ impl CodexMessageProcessor {
ClientRequest::GetUserSavedConfig { request_id } => {
self.get_user_saved_config(request_id).await;
}
ClientRequest::SetDefaultModel { request_id, params } => {
self.set_default_model(request_id, params).await;
}
ClientRequest::GetUserAgent { request_id } => {
self.get_user_agent(request_id).await;
}
@@ -507,40 +499,6 @@ impl CodexMessageProcessor {
self.outgoing.send_response(request_id, response).await;
}
async fn set_default_model(&self, request_id: RequestId, params: SetDefaultModelParams) {
let SetDefaultModelParams {
model,
reasoning_effort,
} = params;
let effort_str = reasoning_effort.map(|effort| effort.to_string());
let overrides: [(&[&str], Option<&str>); 2] = [
(&[CONFIG_KEY_MODEL], model.as_deref()),
(&[CONFIG_KEY_EFFORT], effort_str.as_deref()),
];
match persist_non_null_overrides(
&self.config.codex_home,
self.config.active_profile.as_deref(),
&overrides,
)
.await
{
Ok(()) => {
let response = SetDefaultModelResponse {};
self.outgoing.send_response(request_id, response).await;
}
Err(err) => {
let error = JSONRPCErrorError {
code: INTERNAL_ERROR_CODE,
message: format!("failed to persist overrides: {err}"),
data: None,
};
self.outgoing.send_error(request_id, error).await;
}
}
}
async fn exec_one_off_command(&self, request_id: RequestId, params: ExecOneOffCommandParams) {
tracing::debug!("ExecOneOffCommand params: {params:?}");
@@ -635,7 +593,6 @@ impl CodexMessageProcessor {
let response = NewConversationResponse {
conversation_id,
model: session_configured.model,
reasoning_effort: session_configured.reasoning_effort,
rollout_path: session_configured.rollout_path,
};
self.outgoing.send_response(request_id, response).await;

View File

@@ -258,7 +258,6 @@ pub(crate) struct OutgoingError {
mod tests {
use codex_core::protocol::EventMsg;
use codex_core::protocol::SessionConfiguredEvent;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::mcp_protocol::ConversationId;
use codex_protocol::mcp_protocol::LoginChatGptCompleteNotification;
use pretty_assertions::assert_eq;
@@ -280,7 +279,6 @@ mod tests {
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: conversation_id,
model: "gpt-4o".to_string(),
reasoning_effort: ReasoningEffort::default(),
history_log_id: 1,
history_entry_count: 1000,
initial_messages: None,
@@ -314,7 +312,6 @@ mod tests {
let session_configured_event = SessionConfiguredEvent {
session_id: conversation_id,
model: "gpt-4o".to_string(),
reasoning_effort: ReasoningEffort::default(),
history_log_id: 1,
history_entry_count: 1000,
initial_messages: None,
@@ -345,7 +342,6 @@ mod tests {
"msg": {
"session_id": session_configured_event.session_id,
"model": session_configured_event.model,
"reasoning_effort": session_configured_event.reasoning_effort,
"history_log_id": session_configured_event.history_log_id,
"history_entry_count": session_configured_event.history_entry_count,
"type": "session_configured",

View File

@@ -24,7 +24,6 @@ use codex_protocol::mcp_protocol::RemoveConversationListenerParams;
use codex_protocol::mcp_protocol::ResumeConversationParams;
use codex_protocol::mcp_protocol::SendUserMessageParams;
use codex_protocol::mcp_protocol::SendUserTurnParams;
use codex_protocol::mcp_protocol::SetDefaultModelParams;
use mcp_types::CallToolRequestParams;
use mcp_types::ClientCapabilities;
@@ -302,15 +301,6 @@ impl McpProcess {
self.send_request("userInfo", None).await
}
/// Send a `setDefaultModel` JSON-RPC request.
pub async fn send_set_default_model_request(
&mut self,
params: SetDefaultModelParams,
) -> anyhow::Result<i64> {
let params = Some(serde_json::to_value(params)?);
self.send_request("setDefaultModel", params).await
}
/// Send a `listConversations` JSON-RPC request.
pub async fn send_list_conversations_request(
&mut self,

View File

@@ -90,7 +90,6 @@ async fn test_codex_jsonrpc_conversation_flow() {
let NewConversationResponse {
conversation_id,
model,
reasoning_effort: _,
rollout_path: _,
} = new_conv_resp;
assert_eq!(model, "mock-model");

View File

@@ -59,7 +59,6 @@ async fn test_conversation_create_and_send_message_ok() {
let NewConversationResponse {
conversation_id,
model,
reasoning_effort: _,
rollout_path: _,
} = to_response::<NewConversationResponse>(new_conv_resp)
.expect("deserialize newConversation response");

View File

@@ -9,6 +9,5 @@ mod interrupt;
mod list_resume;
mod login;
mod send_message;
mod set_default_model;
mod user_agent;
mod user_info;

View File

@@ -1,62 +0,0 @@
use codex_core::config::ConfigToml;
use codex_protocol::config_types::ReasoningEffort;
use codex_protocol::mcp_protocol::SetDefaultModelParams;
use codex_protocol::mcp_protocol::SetDefaultModelResponse;
use mcp_test_support::McpProcess;
use mcp_test_support::to_response;
use mcp_types::JSONRPCResponse;
use mcp_types::RequestId;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn set_default_model_persists_overrides() {
let codex_home = TempDir::new().unwrap_or_else(|e| panic!("create tempdir: {e}"));
let mut mcp = McpProcess::new(codex_home.path())
.await
.expect("spawn mcp process");
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize())
.await
.expect("init timeout")
.expect("init failed");
let params = SetDefaultModelParams {
model: Some("o4-mini".to_string()),
reasoning_effort: Some(ReasoningEffort::High),
};
let request_id = mcp
.send_set_default_model_request(params)
.await
.expect("send setDefaultModel");
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await
.expect("setDefaultModel timeout")
.expect("setDefaultModel response");
let _: SetDefaultModelResponse =
to_response(resp).expect("deserialize setDefaultModel response");
let config_path = codex_home.path().join("config.toml");
let config_contents = tokio::fs::read_to_string(&config_path)
.await
.expect("read config.toml");
let config_toml: ConfigToml = toml::from_str(&config_contents).expect("parse config.toml");
assert_eq!(
ConfigToml {
model: Some("o4-mini".to_string()),
model_reasoning_effort: Some(ReasoningEffort::High),
..Default::default()
},
config_toml,
);
}

View File

@@ -40,7 +40,6 @@ pub fn generate_ts(out_dir: &Path, prettier: Option<&Path>) -> Result<()> {
codex_protocol::mcp_protocol::ApplyPatchApprovalResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::ExecCommandApprovalResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::GetUserSavedConfigResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::SetDefaultModelResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::GetUserAgentResponse::export_all_to(out_dir)?;
codex_protocol::mcp_protocol::UserInfoResponse::export_all_to(out_dir)?;

View File

@@ -153,11 +153,6 @@ pub enum ClientRequest {
#[serde(rename = "id")]
request_id: RequestId,
},
SetDefaultModel {
#[serde(rename = "id")]
request_id: RequestId,
params: SetDefaultModelParams,
},
GetUserAgent {
#[serde(rename = "id")]
request_id: RequestId,
@@ -222,8 +217,6 @@ pub struct NewConversationParams {
pub struct NewConversationResponse {
pub conversation_id: ConversationId,
pub model: String,
/// Note this could be ignored by the model.
pub reasoning_effort: ReasoningEffort,
pub rollout_path: PathBuf,
}
@@ -421,19 +414,6 @@ pub struct GetUserSavedConfigResponse {
pub config: UserSavedConfig,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
#[serde(rename_all = "camelCase")]
pub struct SetDefaultModelParams {
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<ReasoningEffort>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, TS)]
#[serde(rename_all = "camelCase")]
pub struct SetDefaultModelResponse {}
/// UserSavedConfig contains a subset of the config. It is meant to expose mcp
/// client-configurable settings that can be specified in the NewConversation
/// and SendUserTurn requests.

View File

@@ -1081,9 +1081,6 @@ pub struct SessionConfiguredEvent {
/// Tell the client what model is being queried.
pub model: String,
/// The effort the model is putting into reasoning about the user's request.
pub reasoning_effort: ReasoningEffortConfig,
/// Identifier of the history log file (inode on Unix, 0 otherwise).
pub history_log_id: u64,
@@ -1172,7 +1169,6 @@ mod tests {
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: conversation_id,
model: "codex-mini-latest".to_string(),
reasoning_effort: ReasoningEffortConfig::default(),
history_log_id: 0,
history_entry_count: 0,
initial_messages: None,
@@ -1186,7 +1182,6 @@ mod tests {
"type": "session_configured",
"session_id": "67e55044-10b1-426f-9247-bb680e5fe0c8",
"model": "codex-mini-latest",
"reasoning_effort": "medium",
"history_log_id": 0,
"history_entry_count": 0,
"rollout_path": format!("{}", rollout_file.path().display()),

View File

@@ -14,7 +14,6 @@ use codex_core::config::Config;
use codex_core::config::persist_model_selection;
use codex_core::model_family::find_family_for_model;
use codex_core::protocol::TokenUsage;
use codex_core::protocol_config_types::ReasoningEffort as ReasoningEffortConfig;
use color_eyre::eyre::Result;
use color_eyre::eyre::WrapErr;
use crossterm::event::KeyCode;
@@ -298,7 +297,7 @@ impl App {
self.chat_widget.apply_file_search_result(query, matches);
}
AppEvent::UpdateReasoningEffort(effort) => {
self.on_update_reasoning_effort(effort);
self.chat_widget.set_reasoning_effort(effort);
}
AppEvent::UpdateModel(model) => {
self.chat_widget.set_model(model.clone());
@@ -337,20 +336,6 @@ impl App {
}
}
fn on_update_reasoning_effort(&mut self, effort: ReasoningEffortConfig) {
let changed = self.config.model_reasoning_effort != effort;
self.chat_widget.set_reasoning_effort(effort);
self.config.model_reasoning_effort = effort;
if changed {
let show_hint = self.model_saved_to_profile || self.model_saved_to_global;
self.model_saved_to_profile = false;
self.model_saved_to_global = false;
if show_hint {
self.show_model_save_hint();
}
}
}
async fn persist_model_shortcut(&mut self) {
enum SaveScope<'a> {
Profile(&'a str),
@@ -491,67 +476,3 @@ impl App {
};
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::app_backtrack::BacktrackState;
use crate::chatwidget::tests::make_chatwidget_manual_with_sender;
use crate::file_search::FileSearchManager;
use codex_core::CodexAuth;
use codex_core::ConversationManager;
use ratatui::text::Line;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
fn make_test_app() -> App {
let (chat_widget, app_event_tx, _rx, _op_rx) = make_chatwidget_manual_with_sender();
let config = chat_widget.config_ref().clone();
let server = Arc::new(ConversationManager::with_auth(CodexAuth::from_api_key(
"Test API Key",
)));
let file_search = FileSearchManager::new(config.cwd.clone(), app_event_tx.clone());
App {
server,
app_event_tx,
chat_widget,
config,
active_profile: None,
model_saved_to_profile: false,
model_saved_to_global: false,
file_search,
transcript_lines: Vec::<Line<'static>>::new(),
overlay: None,
deferred_history_lines: Vec::new(),
has_emitted_history_lines: false,
enhanced_keys_supported: false,
commit_anim_running: Arc::new(AtomicBool::new(false)),
backtrack: BacktrackState::default(),
}
}
#[test]
fn update_reasoning_effort_updates_config_and_resets_flags() {
let mut app = make_test_app();
app.model_saved_to_profile = true;
app.model_saved_to_global = true;
app.config.model_reasoning_effort = ReasoningEffortConfig::Medium;
app.chat_widget
.set_reasoning_effort(ReasoningEffortConfig::Medium);
app.on_update_reasoning_effort(ReasoningEffortConfig::High);
assert_eq!(
app.config.model_reasoning_effort,
ReasoningEffortConfig::High
);
assert_eq!(
app.chat_widget.config_ref().model_reasoning_effort,
ReasoningEffortConfig::High
);
assert!(!app.model_saved_to_profile);
assert!(!app.model_saved_to_global);
}
}

View File

@@ -1451,4 +1451,4 @@ fn extract_first_bold(s: &str) -> Option<String> {
}
#[cfg(test)]
pub(crate) mod tests;
mod tests;

View File

@@ -138,7 +138,6 @@ fn resumed_initial_messages_render_history() {
let configured = codex_core::protocol::SessionConfiguredEvent {
session_id: conversation_id,
model: "test-model".to_string(),
reasoning_effort: ReasoningEffortConfig::default(),
history_log_id: 0,
history_entry_count: 0,
initial_messages: Some(vec![
@@ -247,17 +246,6 @@ fn make_chatwidget_manual() -> (
(widget, rx, op_rx)
}
pub(crate) fn make_chatwidget_manual_with_sender() -> (
ChatWidget,
AppEventSender,
tokio::sync::mpsc::UnboundedReceiver<AppEvent>,
tokio::sync::mpsc::UnboundedReceiver<Op>,
) {
let (widget, rx, op_rx) = make_chatwidget_manual();
let app_event_tx = widget.app_event_tx.clone();
(widget, app_event_tx, rx, op_rx)
}
fn drain_insert_history(
rx: &mut tokio::sync::mpsc::UnboundedReceiver<AppEvent>,
) -> Vec<Vec<ratatui::text::Line<'static>>> {

View File

@@ -601,7 +601,6 @@ pub(crate) fn new_session_info(
) -> PlainHistoryCell {
let SessionConfiguredEvent {
model,
reasoning_effort: _,
session_id: _,
history_log_id: _,
history_entry_count: _,

View File

@@ -32,7 +32,7 @@ pub(crate) struct StatusIndicatorWidget {
}
// Format elapsed seconds into a compact human-friendly form used by the status line.
// Examples: 0s, 59s, 1m 00s, 59m 59s, 1h 00m 00s, 2h 03m 09s
// Examples: 0s, 59s, 1m00s, 59m59s, 1h00m00s, 2h03m09s
fn fmt_elapsed_compact(elapsed_secs: u64) -> String {
if elapsed_secs < 60 {
return format!("{elapsed_secs}s");
@@ -40,12 +40,12 @@ fn fmt_elapsed_compact(elapsed_secs: u64) -> String {
if elapsed_secs < 3600 {
let minutes = elapsed_secs / 60;
let seconds = elapsed_secs % 60;
return format!("{minutes}m {seconds:02}s");
return format!("{minutes}m{seconds:02}s");
}
let hours = elapsed_secs / 3600;
let minutes = (elapsed_secs % 3600) / 60;
let seconds = elapsed_secs % 60;
format!("{hours}h {minutes:02}m {seconds:02}s")
format!("{hours}h{minutes:02}m{seconds:02}s")
}
impl StatusIndicatorWidget {
@@ -209,13 +209,13 @@ mod tests {
assert_eq!(fmt_elapsed_compact(0), "0s");
assert_eq!(fmt_elapsed_compact(1), "1s");
assert_eq!(fmt_elapsed_compact(59), "59s");
assert_eq!(fmt_elapsed_compact(60), "1m 00s");
assert_eq!(fmt_elapsed_compact(61), "1m 01s");
assert_eq!(fmt_elapsed_compact(3 * 60 + 5), "3m 05s");
assert_eq!(fmt_elapsed_compact(59 * 60 + 59), "59m 59s");
assert_eq!(fmt_elapsed_compact(3600), "1h 00m 00s");
assert_eq!(fmt_elapsed_compact(3600 + 60 + 1), "1h 01m 01s");
assert_eq!(fmt_elapsed_compact(25 * 3600 + 2 * 60 + 3), "25h 02m 03s");
assert_eq!(fmt_elapsed_compact(60), "1m00s");
assert_eq!(fmt_elapsed_compact(61), "1m01s");
assert_eq!(fmt_elapsed_compact(3 * 60 + 5), "3m05s");
assert_eq!(fmt_elapsed_compact(59 * 60 + 59), "59m59s");
assert_eq!(fmt_elapsed_compact(3600), "1h00m00s");
assert_eq!(fmt_elapsed_compact(3600 + 60 + 1), "1h01m01s");
assert_eq!(fmt_elapsed_compact(25 * 3600 + 2 * 60 + 3), "25h02m03s");
}
#[test]

View File

@@ -1,4 +1,4 @@
{"ts":"2025-08-09T15:51:04.827Z","dir":"meta","kind":"session_start","cwd":"/Users/easong/code/codex/codex-rs","model":"gpt-5","reasoning_effort":"medium","model_provider_id":"openai","model_provider_name":"OpenAI"}
{"ts":"2025-08-09T15:51:04.827Z","dir":"meta","kind":"session_start","cwd":"/Users/easong/code/codex/codex-rs","model":"gpt-5","model_provider_id":"openai","model_provider_name":"OpenAI"}
{"ts":"2025-08-09T15:51:04.827Z","dir":"to_tui","kind":"key_event","event":"KeyEvent { code: Char('c'), modifiers: KeyModifiers(0x0), kind: Press, state: KeyEventState(0x0) }"}
{"ts":"2025-08-09T15:51:04.827Z","dir":"to_tui","kind":"key_event","event":"KeyEvent { code: Char('o'), modifiers: KeyModifiers(0x0), kind: Press, state: KeyEventState(0x0) }"}
{"ts":"2025-08-09T15:51:04.827Z","dir":"to_tui","kind":"key_event","event":"KeyEvent { code: Char('m'), modifiers: KeyModifiers(0x0), kind: Press, state: KeyEventState(0x0) }"}
@@ -34,7 +34,7 @@
{"ts":"2025-08-09T15:51:04.829Z","dir":"to_tui","kind":"app_event","variant":"RequestRedraw"}
{"ts":"2025-08-09T15:51:04.829Z","dir":"to_tui","kind":"log_line","line":"[INFO codex_core::codex] resume_path: None"}
{"ts":"2025-08-09T15:51:04.830Z","dir":"to_tui","kind":"app_event","variant":"Redraw"}
{"ts":"2025-08-09T15:51:04.856Z","dir":"to_tui","kind":"codex_event","payload":{"id":"0","msg":{"type":"session_configured","session_id":"d126e3d0-80ed-480a-be8c-09d97ff602cf","model":"gpt-5","reasoning_effort":"medium","history_log_id":2532619,"history_entry_count":339,"rollout_path":"/tmp/codex-test-rollout.jsonl"}}}
{"ts":"2025-08-09T15:51:04.856Z","dir":"to_tui","kind":"codex_event","payload":{"id":"0","msg":{"type":"session_configured","session_id":"d126e3d0-80ed-480a-be8c-09d97ff602cf","model":"gpt-5","history_log_id":2532619,"history_entry_count":339,"rollout_path":"/tmp/codex-test-rollout.jsonl"}}}
{"ts":"2025-08-09T15:51:04.856Z","dir":"to_tui","kind":"insert_history","lines":9}
{"ts":"2025-08-09T15:51:04.857Z","dir":"to_tui","kind":"app_event","variant":"RequestRedraw"}
{"ts":"2025-08-09T15:51:04.857Z","dir":"to_tui","kind":"app_event","variant":"RequestRedraw"}
@@ -16447,7 +16447,7 @@
{"ts":"2025-08-09T16:06:58.083Z","dir":"to_tui","kind":"app_event","variant":"RequestRedraw"}
{"ts":"2025-08-09T16:06:58.085Z","dir":"to_tui","kind":"app_event","variant":"Redraw"}
{"ts":"2025-08-09T16:06:58.085Z","dir":"to_tui","kind":"log_line","line":"[INFO codex_core::codex] resume_path: None"}
{"ts":"2025-08-09T16:06:58.136Z","dir":"to_tui","kind":"codex_event","payload":{"id":"0","msg":{"type":"session_configured","session_id":"c7df96da-daec-4fe9-aed9-3cd19b7a6192","model":"gpt-5","reasoning_effort":"medium","history_log_id":2532619,"history_entry_count":342,"rollout_path":"/tmp/codex-test-rollout.jsonl"}}}
{"ts":"2025-08-09T16:06:58.136Z","dir":"to_tui","kind":"codex_event","payload":{"id":"0","msg":{"type":"session_configured","session_id":"c7df96da-daec-4fe9-aed9-3cd19b7a6192","model":"gpt-5","history_log_id":2532619,"history_entry_count":342,"rollout_path":"/tmp/codex-test-rollout.jsonl"}}}
{"ts":"2025-08-09T16:06:58.136Z","dir":"to_tui","kind":"insert_history","lines":9}
{"ts":"2025-08-09T16:06:58.136Z","dir":"to_tui","kind":"app_event","variant":"RequestRedraw"}
{"ts":"2025-08-09T16:06:58.136Z","dir":"to_tui","kind":"app_event","variant":"RequestRedraw"}

View File

@@ -54,22 +54,22 @@ Send a `tools/list` request and you will see that there are two tools available:
Property | Type | Description
-------------------|----------|----------------------------------------------------------------------------------------------------------
**`prompt`** (required) | string | The initial user prompt to start the Codex conversation.
`approval-policy` | string | Approval policy for shell commands generated by the model: `untrusted`, `on-failure`, `never`.
`base-instructions` | string | The set of instructions to use instead of the default ones.
`config` | object | Individual [config settings](https://github.com/openai/codex/blob/main/docs/config.md#config) that will override what is in `$CODEX_HOME/config.toml`.
`cwd` | string | Working directory for the session. If relative, resolved against the server process's current directory.
`include-plan-tool` | boolean | Whether to include the plan tool in the conversation.
`model` | string | Optional override for the model name (e.g. `o3`, `o4-mini`).
`profile` | string | Configuration profile from `config.toml` to specify default options.
`sandbox` | string | Sandbox mode: `read-only`, `workspace-write`, or `danger-full-access`.
approval-policy | string | Approval policy for shell commands generated by the model: `untrusted`, `on-failure`, `never`.
base-instructions | string | The set of instructions to use instead of the default ones.
config | object | Individual [config settings](https://github.com/openai/codex/blob/main/docs/config.md#config) that will override what is in CODEX_HOME/config.toml.
cwd | string | Working directory for the session. If relative, resolved against the server process's current directory.
include-plan-tool | boolean | Whether to include the plan tool in the conversation.
model | string | Optional override for the model name (e.g. "o3", "o4-mini").
profile | string | Configuration profile from config.toml to specify default options.
**prompt** (required) | string | The initial user prompt to start the Codex conversation.
sandbox | string | Sandbox mode: `read-only`, `workspace-write`, or `danger-full-access`.
**`codex-reply`** - Continue a Codex session by providing the conversation id and prompt. The `codex-reply` tool takes the following properties:
**`codex-reply`** - Continue a Codex session by providing the session id and prompt. The `codex-reply` tool takes the following properties:
Property | Type | Description
-----------|--------|---------------------------------------------------------------
**`prompt`** (required) | string | The next user prompt to continue the Codex conversation.
**`conversationId`** (required) | string | The id of the conversation to continue.
**prompt** (required) | string | The next user prompt to continue the Codex conversation.
**conversationId** (required) | string | The id of the conversation to continue.
### Trying it Out
> [!TIP]