Compare commits

..

2 Commits

Author SHA1 Message Date
easong-openai
4d36208931 better? 2025-07-28 18:03:04 -07:00
easong-openai
d276932354 add --experimental-prompt support 2025-07-28 17:32:59 -07:00
64 changed files with 1939 additions and 2670 deletions

View File

@@ -95,12 +95,6 @@ codex login
If you complete the process successfully, you should have a `~/.codex/auth.json` file that contains the credentials that Codex will use.
To verify whether you are currently logged in, run:
```
codex login status
```
If you encounter problems with the login flow, please comment on <https://github.com/openai/codex/issues/1243>.
<details>

4
codex-rs/Cargo.lock generated
View File

@@ -673,9 +673,7 @@ dependencies = [
"async-channel",
"base64 0.22.1",
"bytes",
"chrono",
"codex-apply-patch",
"codex-login",
"codex-mcp-client",
"core_test_support",
"dirs",
@@ -824,7 +822,6 @@ dependencies = [
"serde",
"serde_json",
"shlex",
"strum_macros 0.27.2",
"tempfile",
"tokio",
"tokio-test",
@@ -863,6 +860,7 @@ dependencies = [
"shlex",
"strum 0.27.2",
"strum_macros 0.27.2",
"tempfile",
"tokio",
"tracing",
"tracing-appender",

View File

@@ -21,14 +21,10 @@ pub(crate) async fn chatgpt_get_request<T: DeserializeOwned>(
let token =
get_chatgpt_token_data().ok_or_else(|| anyhow::anyhow!("ChatGPT token not available"))?;
let account_id = token.account_id.ok_or_else(|| {
anyhow::anyhow!("ChatGPT account ID not available, please re-run `codex login`")
});
let response = client
.get(&url)
.bearer_auth(&token.access_token)
.header("chatgpt-account-id", account_id?)
.header("chatgpt-account-id", &token.account_id)
.header("Content-Type", "application/json")
.header("User-Agent", "codex-cli")
.send()

View File

@@ -18,10 +18,7 @@ pub fn set_chatgpt_token_data(value: TokenData) {
/// Initialize the ChatGPT token from auth.json file
pub async fn init_chatgpt_token_from_auth(codex_home: &Path) -> std::io::Result<()> {
let auth = codex_login::load_auth(codex_home)?;
if let Some(auth) = auth {
let token_data = auth.get_token_data().await?;
set_chatgpt_token_data(token_data);
}
let auth_json = codex_login::try_read_auth_json(codex_home).await?;
set_chatgpt_token_data(auth_json.tokens.clone());
Ok(())
}

View File

@@ -1,12 +1,25 @@
use codex_common::CliConfigOverrides;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_login::AuthMode;
use codex_login::load_auth;
use codex_login::login_with_chatgpt;
pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) -> ! {
let config = load_config_or_exit(cli_config_overrides);
let cli_overrides = match cli_config_overrides.parse_overrides() {
Ok(v) => v,
Err(e) => {
eprintln!("Error parsing -c overrides: {e}");
std::process::exit(1);
}
};
let config_overrides = ConfigOverrides::default();
let config = match Config::load_with_cli_overrides(cli_overrides, config_overrides) {
Ok(config) => config,
Err(e) => {
eprintln!("Error loading configuration: {e}");
std::process::exit(1);
}
};
let capture_output = false;
match login_with_chatgpt(&config.codex_home, capture_output).await {
@@ -20,77 +33,3 @@ pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) ->
}
}
}
pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! {
let config = load_config_or_exit(cli_config_overrides);
match load_auth(&config.codex_home) {
Ok(Some(auth)) => match auth.mode {
AuthMode::ApiKey => {
if let Some(api_key) = auth.api_key.as_deref() {
eprintln!("Logged in using an API key - {}", safe_format_key(api_key));
} else {
eprintln!("Logged in using an API key");
}
std::process::exit(0);
}
AuthMode::ChatGPT => {
eprintln!("Logged in using ChatGPT");
std::process::exit(0);
}
},
Ok(None) => {
eprintln!("Not logged in");
std::process::exit(1);
}
Err(e) => {
eprintln!("Error checking login status: {e}");
std::process::exit(1);
}
}
}
fn load_config_or_exit(cli_config_overrides: CliConfigOverrides) -> Config {
let cli_overrides = match cli_config_overrides.parse_overrides() {
Ok(v) => v,
Err(e) => {
eprintln!("Error parsing -c overrides: {e}");
std::process::exit(1);
}
};
let config_overrides = ConfigOverrides::default();
match Config::load_with_cli_overrides(cli_overrides, config_overrides) {
Ok(config) => config,
Err(e) => {
eprintln!("Error loading configuration: {e}");
std::process::exit(1);
}
}
}
fn safe_format_key(key: &str) -> String {
if key.len() <= 13 {
return "***".to_string();
}
let prefix = &key[..8];
let suffix = &key[key.len() - 5..];
format!("{prefix}***{suffix}")
}
#[cfg(test)]
mod tests {
use super::safe_format_key;
#[test]
fn formats_long_key() {
let key = "sk-proj-1234567890ABCDE";
assert_eq!(safe_format_key(key), "sk-proj-***ABCDE");
}
#[test]
fn short_key_returns_stars() {
let key = "sk-proj-12345";
assert_eq!(safe_format_key(key), "***");
}
}

View File

@@ -7,7 +7,6 @@ use codex_chatgpt::apply_command::ApplyCommand;
use codex_chatgpt::apply_command::run_apply_command;
use codex_cli::LandlockCommand;
use codex_cli::SeatbeltCommand;
use codex_cli::login::run_login_status;
use codex_cli::login::run_login_with_chatgpt;
use codex_cli::proto;
use codex_common::CliConfigOverrides;
@@ -44,7 +43,7 @@ enum Subcommand {
#[clap(visible_alias = "e")]
Exec(ExecCli),
/// Manage login.
/// Login with ChatGPT.
Login(LoginCommand),
/// Experimental: run Codex as an MCP server.
@@ -91,15 +90,6 @@ enum DebugCommand {
struct LoginCommand {
#[clap(skip)]
config_overrides: CliConfigOverrides,
#[command(subcommand)]
action: Option<LoginSubcommand>,
}
#[derive(Debug, clap::Subcommand)]
enum LoginSubcommand {
/// Show login status.
Status,
}
fn main() -> anyhow::Result<()> {
@@ -116,7 +106,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
None => {
let mut tui_cli = cli.interactive;
prepend_config_flags(&mut tui_cli.config_overrides, cli.config_overrides);
let usage = codex_tui::run_main(tui_cli, codex_linux_sandbox_exe).await?;
let usage = codex_tui::run_main(tui_cli, codex_linux_sandbox_exe)?;
println!("{}", codex_core::protocol::FinalOutput::from(usage));
}
Some(Subcommand::Exec(mut exec_cli)) => {
@@ -128,14 +118,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()
}
Some(Subcommand::Login(mut login_cli)) => {
prepend_config_flags(&mut login_cli.config_overrides, cli.config_overrides);
match login_cli.action {
Some(LoginSubcommand::Status) => {
run_login_status(login_cli.config_overrides).await;
}
None => {
run_login_with_chatgpt(login_cli.config_overrides).await;
}
}
run_login_with_chatgpt(login_cli.config_overrides).await;
}
Some(Subcommand::Proto(mut proto_cli)) => {
prepend_config_flags(&mut proto_cli.config_overrides, cli.config_overrides);

View File

@@ -9,7 +9,6 @@ use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::protocol::Submission;
use codex_core::util::notify_on_sigint;
use codex_login::load_auth;
use tokio::io::AsyncBufReadExt;
use tokio::io::BufReader;
use tracing::error;
@@ -36,9 +35,8 @@ pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> {
.map_err(anyhow::Error::msg)?;
let config = Config::load_with_cli_overrides(overrides_vec, ConfigOverrides::default())?;
let auth = load_auth(&config.codex_home)?;
let ctrl_c = notify_on_sigint();
let CodexSpawnOk { codex, .. } = Codex::spawn(config, auth, ctrl_c.clone()).await?;
let CodexSpawnOk { codex, .. } = Codex::spawn(config, ctrl_c.clone()).await?;
let codex = Arc::new(codex);
// Task that reads JSON lines from stdin and forwards to Submission Queue

View File

@@ -110,15 +110,12 @@ stream_idle_timeout_ms = 300000 # 5m idle timeout
```
#### request_max_retries
How many times Codex will retry a failed HTTP request to the model provider. Defaults to `4`.
#### stream_max_retries
Number of times Codex will attempt to reconnect when a streaming response is interrupted. Defaults to `10`.
#### stream_idle_timeout_ms
How long Codex will wait for activity on a streaming response before treating the connection as lost. Defaults to `300_000` (5 minutes).
## model_provider

View File

@@ -17,8 +17,6 @@ base64 = "0.22"
bytes = "1.10.1"
codex-apply-patch = { path = "../apply-patch" }
codex-mcp-client = { path = "../mcp-client" }
chrono = { version = "0.4", features = ["serde"] }
codex-login = { path = "../login" }
dirs = "6"
env-flags = "0.1.1"
eventsource-stream = "0.2.3"

View File

@@ -30,7 +30,6 @@ use crate::util::backoff;
pub(crate) async fn stream_chat_completions(
prompt: &Prompt,
model: &str,
include_plan_tool: bool,
client: &reqwest::Client,
provider: &ModelProviderInfo,
) -> Result<ResponseStream> {
@@ -40,10 +39,6 @@ pub(crate) async fn stream_chat_completions(
let full_instructions = prompt.get_full_instructions(model);
messages.push(json!({"role": "system", "content": full_instructions}));
if let Some(instr) = &prompt.user_instructions {
messages.push(json!({"role": "user", "content": instr}));
}
for item in &prompt.input {
match item {
ResponseItem::Message { role, content, .. } => {
@@ -110,7 +105,7 @@ pub(crate) async fn stream_chat_completions(
}
}
let tools_json = create_tools_json_for_chat_completions_api(prompt, model, include_plan_tool)?;
let tools_json = create_tools_json_for_chat_completions_api(prompt, model)?;
let payload = json!({
"model": model,
"messages": messages,

View File

@@ -3,8 +3,6 @@ use std::path::Path;
use std::time::Duration;
use bytes::Bytes;
use codex_login::AuthMode;
use codex_login::CodexAuth;
use eventsource_stream::Eventsource;
use futures::prelude::*;
use reqwest::StatusCode;
@@ -30,12 +28,10 @@ use crate::config::Config;
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::error::CodexErr;
use crate::error::EnvVarError;
use crate::error::Result;
use crate::flags::CODEX_RS_SSE_FIXTURE;
use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
use crate::models::ContentItem;
use crate::models::ResponseItem;
use crate::openai_tools::create_tools_json_for_responses_api;
use crate::protocol::TokenUsage;
@@ -45,7 +41,6 @@ use std::sync::Arc;
#[derive(Clone)]
pub struct ModelClient {
config: Arc<Config>,
auth: Option<CodexAuth>,
client: reqwest::Client,
provider: ModelProviderInfo,
session_id: Uuid,
@@ -56,7 +51,6 @@ pub struct ModelClient {
impl ModelClient {
pub fn new(
config: Arc<Config>,
auth: Option<CodexAuth>,
provider: ModelProviderInfo,
effort: ReasoningEffortConfig,
summary: ReasoningSummaryConfig,
@@ -64,7 +58,6 @@ impl ModelClient {
) -> Self {
Self {
config,
auth,
client: reqwest::Client::new(),
provider,
session_id,
@@ -84,7 +77,6 @@ impl ModelClient {
let response_stream = stream_chat_completions(
prompt,
&self.config.model,
self.config.include_plan_tool,
&self.client,
&self.provider,
)
@@ -122,60 +114,28 @@ impl ModelClient {
return stream_from_fixture(path, self.provider.clone()).await;
}
let auth = self.auth.as_ref().ok_or_else(|| {
CodexErr::EnvVar(EnvVarError {
var: "OPENAI_API_KEY".to_string(),
instructions: Some("Create an API key (https://platform.openai.com) and export it as an environment variable.".to_string()),
})
})?;
let store = prompt.store && auth.mode != AuthMode::ChatGPT;
let base_url = match self.provider.base_url.clone() {
Some(url) => url,
None => match auth.mode {
AuthMode::ChatGPT => "https://chatgpt.com/backend-api/codex".to_string(),
AuthMode::ApiKey => "https://api.openai.com/v1".to_string(),
},
};
let token = auth.get_token().await?;
let full_instructions = prompt.get_full_instructions(&self.config.model);
let tools_json = create_tools_json_for_responses_api(
prompt,
&self.config.model,
self.config.include_plan_tool,
)?;
let tools_json = create_tools_json_for_responses_api(prompt, &self.config.model)?;
let reasoning = create_reasoning_param_for_request(&self.config, self.effort, self.summary);
// Request encrypted COT if we are not storing responses,
// otherwise reasoning items will be referenced by ID
let include: Vec<String> = if !store && reasoning.is_some() {
let include = if !prompt.store && reasoning.is_some() {
vec!["reasoning.encrypted_content".to_string()]
} else {
vec![]
};
let mut input_with_instructions = Vec::with_capacity(prompt.input.len() + 1);
if let Some(ui) = &prompt.user_instructions {
input_with_instructions.push(ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText { text: ui.clone() }],
});
}
input_with_instructions.extend(prompt.input.clone());
let payload = ResponsesApiRequest {
model: &self.config.model,
instructions: &full_instructions,
input: &input_with_instructions,
input: &prompt.input,
tools: &tools_json,
tool_choice: "auto",
parallel_tool_calls: false,
reasoning,
store,
store: prompt.store,
// TODO: make this configurable
stream: true,
include,
};
@@ -188,21 +148,17 @@ impl ModelClient {
let mut attempt = 0;
let max_retries = self.provider.request_max_retries();
loop {
attempt += 1;
let req_builder = self
.client
.post(format!("{base_url}/responses"))
.provider
.create_request_builder(&self.client)?
.header("OpenAI-Beta", "responses=experimental")
.header("session_id", self.session_id.to_string())
.bearer_auth(&token)
.header(reqwest::header::ACCEPT, "text/event-stream")
.json(&payload);
let req_builder = self.provider.apply_http_headers(req_builder);
let res = req_builder.send().await;
if let Ok(resp) = &res {
trace!(
@@ -611,7 +567,7 @@ mod tests {
let provider = ModelProviderInfo {
name: "test".to_string(),
base_url: Some("https://test.com".to_string()),
base_url: "https://test.com".to_string(),
env_key: Some("TEST_API_KEY".to_string()),
env_key_instructions: None,
wire_api: WireApi::Responses,
@@ -621,7 +577,6 @@ mod tests {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(1000),
requires_auth: false,
};
let events = collect_events(
@@ -671,7 +626,7 @@ mod tests {
let sse1 = format!("event: response.output_item.done\ndata: {item1}\n\n");
let provider = ModelProviderInfo {
name: "test".to_string(),
base_url: Some("https://test.com".to_string()),
base_url: "https://test.com".to_string(),
env_key: Some("TEST_API_KEY".to_string()),
env_key_instructions: None,
wire_api: WireApi::Responses,
@@ -681,7 +636,6 @@ mod tests {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(1000),
requires_auth: false,
};
let events = collect_events(&[sse1.as_bytes()], provider).await;
@@ -774,7 +728,7 @@ mod tests {
let provider = ModelProviderInfo {
name: "test".to_string(),
base_url: Some("https://test.com".to_string()),
base_url: "https://test.com".to_string(),
env_key: Some("TEST_API_KEY".to_string()),
env_key_instructions: None,
wire_api: WireApi::Responses,
@@ -784,7 +738,6 @@ mod tests {
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(1000),
requires_auth: false,
};
let out = run_sse(evs, provider).await;

View File

@@ -44,6 +44,9 @@ impl Prompt {
.as_deref()
.unwrap_or(BASE_INSTRUCTIONS);
let mut sections: Vec<&str> = vec![base];
if let Some(ref user) = self.user_instructions {
sections.push(user);
}
if model.starts_with("gpt-4.1") {
sections.push(APPLY_PATCH_TOOL_INSTRUCTIONS);
}
@@ -185,19 +188,3 @@ impl Stream for ResponseStream {
self.rx_event.poll_recv(cx)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn get_full_instructions_no_user_content() {
let prompt = Prompt {
user_instructions: Some("custom instruction".to_string()),
..Default::default()
};
let expected = format!("{BASE_INSTRUCTIONS}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}");
let full = prompt.get_full_instructions("gpt-4.1");
assert_eq!(full, expected);
}
}

View File

@@ -15,7 +15,6 @@ use async_channel::Sender;
use codex_apply_patch::ApplyPatchAction;
use codex_apply_patch::MaybeApplyPatchVerified;
use codex_apply_patch::maybe_parse_apply_patch_verified;
use codex_login::CodexAuth;
use futures::prelude::*;
use mcp_types::CallToolResult;
use serde::Serialize;
@@ -56,7 +55,6 @@ use crate::models::ReasoningItemReasoningSummary;
use crate::models::ResponseInputItem;
use crate::models::ResponseItem;
use crate::models::ShellToolCallParams;
use crate::plan_tool::handle_update_plan;
use crate::project_doc::get_user_instructions;
use crate::protocol::AgentMessageDeltaEvent;
use crate::protocol::AgentMessageEvent;
@@ -104,11 +102,7 @@ pub struct CodexSpawnOk {
impl Codex {
/// Spawn a new [`Codex`] and initialize the session.
pub async fn spawn(
config: Config,
auth: Option<CodexAuth>,
ctrl_c: Arc<Notify>,
) -> CodexResult<CodexSpawnOk> {
pub async fn spawn(config: Config, ctrl_c: Arc<Notify>) -> CodexResult<CodexSpawnOk> {
// experimental resume path (undocumented)
let resume_path = config.experimental_resume.clone();
info!("resume_path: {resume_path:?}");
@@ -137,7 +131,7 @@ impl Codex {
// Generate a unique ID for the lifetime of this Codex session.
let session_id = Uuid::new_v4();
tokio::spawn(submission_loop(
session_id, config, auth, rx_sub, tx_event, ctrl_c,
session_id, config, rx_sub, tx_event, ctrl_c,
));
let codex = Codex {
next_id: AtomicU64::new(0),
@@ -530,7 +524,6 @@ impl AgentTask {
async fn submission_loop(
mut session_id: Uuid,
config: Arc<Config>,
auth: Option<CodexAuth>,
rx_sub: Receiver<Submission>,
tx_event: Sender<Event>,
ctrl_c: Arc<Notify>,
@@ -642,7 +635,6 @@ async fn submission_loop(
let client = ModelClient::new(
config.clone(),
auth.clone(),
provider.clone(),
model_reasoning_effort,
model_reasoning_summary,
@@ -829,79 +821,6 @@ async fn submission_loop(
}
});
}
Op::SummarizeContext => {
let sess = match sess.as_ref() {
Some(sess) => sess,
None => {
send_no_session_event(sub.id).await;
continue;
}
};
// Create a summarization request as user input
const SUMMARIZATION_PROMPT: &str = r#"
You are the component that compacts a long coding session log into a structured memory object.
This memory will become the ONLY reference for continuing the task.
All critical facts, user intentions, tool results, and file operations must be captured.
Omit filler talk and commentary. Do not invent information; use "none" if evidence is missing.
Output ONLY the XML object below. No extra text.
<project_memory>
<mission>
<!-- One concise line describing the users main goal. -->
</mission>
<essentials>
<!-- Bullet-like facts the agent must retain: commands, APIs, paths, configs, tickets, rules. -->
<!-- Example:
- Build cmd: `npm run build`
- Repo branch: `feature/auth-refactor`
- API version: v2
-->
</essentials>
<workspace>
<!-- Record file interactions and key observations. -->
<!-- Example:
- CREATED: `tests/login.test.ts` initial test
- MODIFIED: `src/auth.ts` swapped jwt library
- DELETED: none
-->
</workspace>
<activity_log>
<!-- Key actions and tool outputs in the recent session. -->
<!-- Example:
- Ran `npm test` 1 failure in `User.test.ts`
- Queried `grep 'oldAPI'` 2 matches
-->
</activity_log>
<next_steps>
<!-- Stepwise plan; mark status. -->
<!-- Example:
1. [DONE] Identify old API usage
2. [NEXT] Refactor `auth.ts` to new API
3. [TODO] Update tests
-->
</next_steps>
</project_memory>
"#;
let summarization_prompt = vec![InputItem::Text {
text: SUMMARIZATION_PROMPT.to_string(),
}];
// Attempt to inject input into current task
if let Err(items) = sess.inject_input(summarization_prompt) {
run_task(sess.clone(), sub.id, items).await;
// only keep the last input item and clear the rest
let mut pending_input = sess.state.lock().unwrap().pending_input.clone();
pending_input.truncate(1);
sess.state.lock().unwrap().pending_input = pending_input;
}
}
Op::Shutdown => {
info!("Shutting down Codex instance");
@@ -1417,7 +1336,6 @@ async fn handle_function_call(
};
handle_container_exec_with_params(params, sess, sub_id, call_id).await
}
"update_plan" => handle_update_plan(sess, arguments, sub_id, call_id).await,
_ => {
match sess.mcp_connection_manager.parse_tool_name(&name) {
Some((server, tool_name)) => {

View File

@@ -6,7 +6,6 @@ use crate::config::Config;
use crate::protocol::Event;
use crate::protocol::EventMsg;
use crate::util::notify_on_sigint;
use codex_login::load_auth;
use tokio::sync::Notify;
use uuid::Uuid;
@@ -26,12 +25,11 @@ pub struct CodexConversation {
/// that callers can surface the information to the UI.
pub async fn init_codex(config: Config) -> anyhow::Result<CodexConversation> {
let ctrl_c = notify_on_sigint();
let auth = load_auth(&config.codex_home)?;
let CodexSpawnOk {
codex,
init_id,
session_id,
} = Codex::spawn(config, auth, ctrl_c.clone()).await?;
} = Codex::spawn(config, ctrl_c.clone()).await?;
// The first event must be `SessionInitialized`. Validate and forward it to
// the caller so that they can display it in the conversation history.

View File

@@ -143,9 +143,6 @@ pub struct Config {
/// Experimental rollout resume path (absolute path to .jsonl; undocumented).
pub experimental_resume: Option<PathBuf>,
/// Include an experimental plan tool that the model can use to update its current plan and status of each step.
pub include_plan_tool: bool,
}
impl Config {
@@ -369,7 +366,6 @@ pub struct ConfigOverrides {
pub config_profile: Option<String>,
pub codex_linux_sandbox_exe: Option<PathBuf>,
pub base_instructions: Option<String>,
pub include_plan_tool: Option<bool>,
}
impl Config {
@@ -392,7 +388,6 @@ impl Config {
config_profile: config_profile_key,
codex_linux_sandbox_exe,
base_instructions,
include_plan_tool,
} = overrides;
let config_profile = match config_profile_key.as_ref().or(cfg.profile.as_ref()) {
@@ -470,14 +465,9 @@ impl Config {
let experimental_resume = cfg.experimental_resume;
// Load base instructions override from a file if specified. If the
// path is relative, resolve it against the effective cwd so the
// behaviour matches other path-like config values.
let file_base_instructions = Self::get_base_instructions(
let base_instructions = base_instructions.or(Self::get_base_instructions(
cfg.experimental_instructions_file.as_ref(),
&resolved_cwd,
)?;
let base_instructions = base_instructions.or(file_base_instructions);
));
let config = Self {
model,
@@ -528,7 +518,6 @@ impl Config {
.unwrap_or("https://chatgpt.com/backend-api/".to_string()),
experimental_resume,
include_plan_tool: include_plan_tool.unwrap_or(false),
};
Ok(config)
}
@@ -550,46 +539,13 @@ impl Config {
})
}
fn get_base_instructions(
path: Option<&PathBuf>,
cwd: &Path,
) -> std::io::Result<Option<String>> {
let p = match path.as_ref() {
None => return Ok(None),
Some(p) => p,
};
fn get_base_instructions(path: Option<&PathBuf>) -> Option<String> {
let path = path.as_ref()?;
// Resolve relative paths against the provided cwd to make CLI
// overrides consistent regardless of where the process was launched
// from.
let full_path = if p.is_relative() {
cwd.join(p)
} else {
p.to_path_buf()
};
let contents = std::fs::read_to_string(&full_path).map_err(|e| {
std::io::Error::new(
e.kind(),
format!(
"failed to read experimental instructions file {}: {e}",
full_path.display()
),
)
})?;
let s = contents.trim().to_string();
if s.is_empty() {
Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!(
"experimental instructions file is empty: {}",
full_path.display()
),
))
} else {
Ok(Some(s))
}
std::fs::read_to_string(path)
.ok()
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
}
}
@@ -795,7 +751,7 @@ disable_response_storage = true
let openai_chat_completions_provider = ModelProviderInfo {
name: "OpenAI using Chat Completions".to_string(),
base_url: Some("https://api.openai.com/v1".to_string()),
base_url: "https://api.openai.com/v1".to_string(),
env_key: Some("OPENAI_API_KEY".to_string()),
wire_api: crate::WireApi::Chat,
env_key_instructions: None,
@@ -805,7 +761,6 @@ disable_response_storage = true
request_max_retries: Some(4),
stream_max_retries: Some(10),
stream_idle_timeout_ms: Some(300_000),
requires_auth: false,
};
let model_provider_map = {
let mut model_provider_map = built_in_model_providers();
@@ -836,7 +791,7 @@ disable_response_storage = true
///
/// 1. custom command-line argument, e.g. `--model o3`
/// 2. as part of a profile, where the `--profile` is specified via a CLI
/// (or in the config file itself)
/// (or in the config file itelf)
/// 3. as an entry in `config.toml`, e.g. `model = "o3"`
/// 4. the default value for a required field defined in code, e.g.,
/// `crate::flags::OPENAI_DEFAULT_MODEL`
@@ -886,7 +841,6 @@ disable_response_storage = true
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
experimental_resume: None,
base_instructions: None,
include_plan_tool: false,
},
o3_profile_config
);
@@ -935,7 +889,6 @@ disable_response_storage = true
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
experimental_resume: None,
base_instructions: None,
include_plan_tool: false,
};
assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
@@ -999,7 +952,6 @@ disable_response_storage = true
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
experimental_resume: None,
base_instructions: None,
include_plan_tool: false,
};
assert_eq!(expected_zdr_profile_config, zdr_profile_config);

View File

@@ -78,7 +78,7 @@ pub enum HistoryPersistence {
#[derive(Deserialize, Debug, Clone, PartialEq, Default)]
pub struct Tui {}
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Default, Serialize)]
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Default)]
#[serde(rename_all = "kebab-case")]
pub enum SandboxMode {
#[serde(rename = "read-only")]

View File

@@ -30,11 +30,10 @@ mod message_history;
mod model_provider_info;
pub use model_provider_info::ModelProviderInfo;
pub use model_provider_info::WireApi;
pub use model_provider_info::built_in_model_providers;
mod models;
pub mod openai_api_key;
mod openai_model_info;
mod openai_tools;
pub mod plan_tool;
mod project_doc;
pub mod protocol;
mod rollout;

View File

@@ -1,5 +1,4 @@
use std::time::Duration;
use std::time::Instant;
use tracing::error;
@@ -8,7 +7,6 @@ use crate::models::FunctionCallOutputPayload;
use crate::models::ResponseInputItem;
use crate::protocol::Event;
use crate::protocol::EventMsg;
use crate::protocol::McpInvocation;
use crate::protocol::McpToolCallBeginEvent;
use crate::protocol::McpToolCallEndEvent;
@@ -43,28 +41,21 @@ pub(crate) async fn handle_mcp_tool_call(
}
};
let invocation = McpInvocation {
let tool_call_begin_event = EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
call_id: call_id.clone(),
server: server.clone(),
tool: tool_name.clone(),
arguments: arguments_value.clone(),
};
let tool_call_begin_event = EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
call_id: call_id.clone(),
invocation: invocation.clone(),
});
notify_mcp_tool_call_event(sess, sub_id, tool_call_begin_event).await;
let start = Instant::now();
// Perform the tool call.
let result = sess
.call_tool(&server, &tool_name, arguments_value.clone(), timeout)
.call_tool(&server, &tool_name, arguments_value, timeout)
.await
.map_err(|e| format!("tool call error: {e}"));
let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent {
call_id: call_id.clone(),
invocation,
duration: start.elapsed(),
result: result.clone(),
});

View File

@@ -12,6 +12,7 @@ use std::env::VarError;
use std::time::Duration;
use crate::error::EnvVarError;
use crate::openai_api_key::get_openai_api_key;
/// Value for the `OpenAI-Originator` header that is sent with requests to
/// OpenAI.
@@ -29,7 +30,7 @@ const DEFAULT_REQUEST_MAX_RETRIES: u64 = 4;
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum WireApi {
/// The Responses API exposed by OpenAI at `/v1/responses`.
/// The experimental "Responses" API exposed by OpenAI at `/v1/responses`.
Responses,
/// Regular Chat Completions compatible with `/v1/chat/completions`.
@@ -43,7 +44,7 @@ pub struct ModelProviderInfo {
/// Friendly display name.
pub name: String,
/// Base URL for the provider's OpenAI-compatible API.
pub base_url: Option<String>,
pub base_url: String,
/// Environment variable that stores the user's API key for this provider.
pub env_key: Option<String>,
@@ -77,10 +78,6 @@ pub struct ModelProviderInfo {
/// Idle timeout (in milliseconds) to wait for activity on a streaming response before treating
/// the connection as lost.
pub stream_idle_timeout_ms: Option<u64>,
/// Whether this provider requires some form of standard authentication (API key, ChatGPT token).
#[serde(default)]
pub requires_auth: bool,
}
impl ModelProviderInfo {
@@ -96,11 +93,11 @@ impl ModelProviderInfo {
&'a self,
client: &'a reqwest::Client,
) -> crate::error::Result<reqwest::RequestBuilder> {
let api_key = self.api_key()?;
let url = self.get_full_url();
let mut builder = client.post(url);
let api_key = self.api_key()?;
if let Some(key) = api_key {
builder = builder.bearer_auth(key);
}
@@ -120,15 +117,9 @@ impl ModelProviderInfo {
.join("&");
format!("?{full_params}")
});
let base_url = self
.base_url
.clone()
.unwrap_or("https://api.openai.com/v1".to_string());
let base_url = &self.base_url;
match self.wire_api {
WireApi::Responses => {
format!("{base_url}/responses{query_string}")
}
WireApi::Responses => format!("{base_url}/responses{query_string}"),
WireApi::Chat => format!("{base_url}/chat/completions{query_string}"),
}
}
@@ -136,10 +127,7 @@ impl ModelProviderInfo {
/// Apply provider-specific HTTP headers (both static and environment-based)
/// onto an existing `reqwest::RequestBuilder` and return the updated
/// builder.
pub fn apply_http_headers(
&self,
mut builder: reqwest::RequestBuilder,
) -> reqwest::RequestBuilder {
fn apply_http_headers(&self, mut builder: reqwest::RequestBuilder) -> reqwest::RequestBuilder {
if let Some(extra) = &self.http_headers {
for (k, v) in extra {
builder = builder.header(k, v);
@@ -164,7 +152,11 @@ impl ModelProviderInfo {
fn api_key(&self) -> crate::error::Result<Option<String>> {
match &self.env_key {
Some(env_key) => {
let env_value = std::env::var(env_key);
let env_value = if env_key == crate::openai_api_key::OPENAI_API_KEY_ENV_VAR {
get_openai_api_key().map_or_else(|| Err(VarError::NotPresent), Ok)
} else {
std::env::var(env_key)
};
env_value
.and_then(|v| {
if v.trim().is_empty() {
@@ -212,51 +204,47 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
// providers are bundled with Codex CLI, so we only include the OpenAI
// provider by default. Users are encouraged to add to `model_providers`
// in config.toml to add their own providers.
[(
"openai",
P {
name: "OpenAI".into(),
// Allow users to override the default OpenAI endpoint by
// exporting `OPENAI_BASE_URL`. This is useful when pointing
// Codex at a proxy, mock server, or Azure-style deployment
// without requiring a full TOML override for the built-in
// OpenAI provider.
base_url: std::env::var("OPENAI_BASE_URL")
.ok()
.filter(|v| !v.trim().is_empty()),
env_key: None,
env_key_instructions: None,
wire_api: WireApi::Responses,
query_params: None,
http_headers: Some(
[
(
"originator".to_string(),
OPENAI_ORIGINATOR_HEADER.to_string(),
),
("version".to_string(), env!("CARGO_PKG_VERSION").to_string()),
]
.into_iter()
.collect(),
),
env_http_headers: Some(
[
(
"OpenAI-Organization".to_string(),
"OPENAI_ORGANIZATION".to_string(),
),
("OpenAI-Project".to_string(), "OPENAI_PROJECT".to_string()),
]
.into_iter()
.collect(),
),
// Use global defaults for retry/timeout unless overridden in config.toml.
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_auth: true,
},
)]
[
(
"openai",
P {
name: "OpenAI".into(),
// Allow users to override the default OpenAI endpoint by
// exporting `OPENAI_BASE_URL`. This is useful when pointing
// Codex at a proxy, mock server, or Azure-style deployment
// without requiring a full TOML override for the built-in
// OpenAI provider.
base_url: std::env::var("OPENAI_BASE_URL")
.ok()
.filter(|v| !v.trim().is_empty())
.unwrap_or_else(|| "https://api.openai.com/v1".to_string()),
env_key: Some("OPENAI_API_KEY".into()),
env_key_instructions: Some("Create an API key (https://platform.openai.com) and export it as an environment variable.".into()),
wire_api: WireApi::Responses,
query_params: None,
http_headers: Some(
[
("originator".to_string(), OPENAI_ORIGINATOR_HEADER.to_string()),
("version".to_string(), env!("CARGO_PKG_VERSION").to_string()),
]
.into_iter()
.collect(),
),
env_http_headers: Some(
[
("OpenAI-Organization".to_string(), "OPENAI_ORGANIZATION".to_string()),
("OpenAI-Project".to_string(), "OPENAI_PROJECT".to_string()),
]
.into_iter()
.collect(),
),
// Use global defaults for retry/timeout unless overridden in config.toml.
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
},
),
]
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect()
@@ -276,7 +264,7 @@ base_url = "http://localhost:11434/v1"
"#;
let expected_provider = ModelProviderInfo {
name: "Ollama".into(),
base_url: Some("http://localhost:11434/v1".into()),
base_url: "http://localhost:11434/v1".into(),
env_key: None,
env_key_instructions: None,
wire_api: WireApi::Chat,
@@ -286,7 +274,6 @@ base_url = "http://localhost:11434/v1"
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_auth: false,
};
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
@@ -303,7 +290,7 @@ query_params = { api-version = "2025-04-01-preview" }
"#;
let expected_provider = ModelProviderInfo {
name: "Azure".into(),
base_url: Some("https://xxxxx.openai.azure.com/openai".into()),
base_url: "https://xxxxx.openai.azure.com/openai".into(),
env_key: Some("AZURE_OPENAI_API_KEY".into()),
env_key_instructions: None,
wire_api: WireApi::Chat,
@@ -315,7 +302,6 @@ query_params = { api-version = "2025-04-01-preview" }
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_auth: false,
};
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();
@@ -333,7 +319,7 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
"#;
let expected_provider = ModelProviderInfo {
name: "Example".into(),
base_url: Some("https://example.com".into()),
base_url: "https://example.com".into(),
env_key: Some("API_KEY".into()),
env_key_instructions: None,
wire_api: WireApi::Chat,
@@ -347,7 +333,6 @@ env_http_headers = { "X-Example-Env-Header" = "EXAMPLE_ENV_VAR" }
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_auth: false,
};
let provider: ModelProviderInfo = toml::from_str(azure_provider_toml).unwrap();

View File

@@ -0,0 +1,24 @@
use std::env;
use std::sync::LazyLock;
use std::sync::RwLock;
pub const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY";
static OPENAI_API_KEY: LazyLock<RwLock<Option<String>>> = LazyLock::new(|| {
let val = env::var(OPENAI_API_KEY_ENV_VAR)
.ok()
.and_then(|s| if s.is_empty() { None } else { Some(s) });
RwLock::new(val)
});
pub fn get_openai_api_key() -> Option<String> {
#![allow(clippy::unwrap_used)]
OPENAI_API_KEY.read().unwrap().clone()
}
pub fn set_openai_api_key(value: String) {
#![allow(clippy::unwrap_used)]
if !value.is_empty() {
*OPENAI_API_KEY.write().unwrap() = Some(value);
}
}

View File

@@ -4,14 +4,13 @@ use std::collections::BTreeMap;
use std::sync::LazyLock;
use crate::client_common::Prompt;
use crate::plan_tool::PLAN_TOOL;
#[derive(Debug, Clone, Serialize)]
pub(crate) struct ResponsesApiTool {
pub(crate) name: &'static str,
pub(crate) description: &'static str,
pub(crate) strict: bool,
pub(crate) parameters: JsonSchema,
name: &'static str,
description: &'static str,
strict: bool,
parameters: JsonSchema,
}
/// When serialized as JSON, this produces a valid "Tool" in the OpenAI
@@ -75,7 +74,6 @@ static DEFAULT_CODEX_MODEL_TOOLS: LazyLock<Vec<OpenAiTool>> =
pub(crate) fn create_tools_json_for_responses_api(
prompt: &Prompt,
model: &str,
include_plan_tool: bool,
) -> crate::error::Result<Vec<serde_json::Value>> {
// Assemble tool list: built-in tools + any extra tools from the prompt.
let default_tools = if model.starts_with("codex") {
@@ -95,10 +93,6 @@ pub(crate) fn create_tools_json_for_responses_api(
.map(|(name, tool)| mcp_tool_to_openai_tool(name, tool)),
);
if include_plan_tool {
tools_json.push(serde_json::to_value(PLAN_TOOL.clone())?);
}
Ok(tools_json)
}
@@ -108,12 +102,10 @@ pub(crate) fn create_tools_json_for_responses_api(
pub(crate) fn create_tools_json_for_chat_completions_api(
prompt: &Prompt,
model: &str,
include_plan_tool: bool,
) -> crate::error::Result<Vec<serde_json::Value>> {
// We start with the JSON for the Responses API and than rewrite it to match
// the chat completions tool call format.
let responses_api_tools_json =
create_tools_json_for_responses_api(prompt, model, include_plan_tool)?;
let responses_api_tools_json = create_tools_json_for_responses_api(prompt, model)?;
let tools_json = responses_api_tools_json
.into_iter()
.filter_map(|mut tool| {

View File

@@ -1,126 +0,0 @@
use std::collections::BTreeMap;
use std::sync::LazyLock;
use serde::Deserialize;
use serde::Serialize;
use crate::codex::Session;
use crate::models::FunctionCallOutputPayload;
use crate::models::ResponseInputItem;
use crate::openai_tools::JsonSchema;
use crate::openai_tools::OpenAiTool;
use crate::openai_tools::ResponsesApiTool;
use crate::protocol::Event;
use crate::protocol::EventMsg;
// Types for the TODO tool arguments matching codex-vscode/todo-mcp/src/main.rs
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum StepStatus {
Pending,
InProgress,
Completed,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct PlanItemArg {
pub step: String,
pub status: StepStatus,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct UpdatePlanArgs {
#[serde(default)]
pub explanation: Option<String>,
pub plan: Vec<PlanItemArg>,
}
pub(crate) static PLAN_TOOL: LazyLock<OpenAiTool> = LazyLock::new(|| {
let mut plan_item_props = BTreeMap::new();
plan_item_props.insert("step".to_string(), JsonSchema::String);
plan_item_props.insert("status".to_string(), JsonSchema::String);
let plan_items_schema = JsonSchema::Array {
items: Box::new(JsonSchema::Object {
properties: plan_item_props,
required: &["step", "status"],
additional_properties: false,
}),
};
let mut properties = BTreeMap::new();
properties.insert("explanation".to_string(), JsonSchema::String);
properties.insert("plan".to_string(), plan_items_schema);
OpenAiTool::Function(ResponsesApiTool {
name: "update_plan",
description: r#"Use the update_plan tool to keep the user updated on the current plan for the task.
After understanding the user's task, call the update_plan tool with an initial plan. An example of a plan:
1. Explore the codebase to find relevant files (status: in_progress)
2. Implement the feature in the XYZ component (status: pending)
3. Commit changes and make a pull request (status: pending)
Each step should be a short, 1-sentence description.
Until all the steps are finished, there should always be exactly one in_progress step in the plan.
Call the update_plan tool whenever you finish a step, marking the completed step as `completed` and marking the next step as `in_progress`.
Before running a command, consider whether or not you have completed the previous step, and make sure to mark it as completed before moving on to the next step.
Sometimes, you may need to change plans in the middle of a task: call `update_plan` with the updated plan and make sure to provide an `explanation` of the rationale when doing so.
When all steps are completed, call update_plan one last time with all steps marked as `completed`."#,
strict: false,
parameters: JsonSchema::Object {
properties,
required: &["plan"],
additional_properties: false,
},
})
});
/// This function doesn't do anything useful. However, it gives the model a structured way to record its plan that clients can read and render.
/// So it's the _inputs_ to this function that are useful to clients, not the outputs and neither are actually useful for the model other
/// than forcing it to come up and document a plan (TBD how that affects performance).
pub(crate) async fn handle_update_plan(
session: &Session,
arguments: String,
sub_id: String,
call_id: String,
) -> ResponseInputItem {
match parse_update_plan_arguments(arguments, &call_id) {
Ok(args) => {
let output = ResponseInputItem::FunctionCallOutput {
call_id,
output: FunctionCallOutputPayload {
content: "Plan updated".to_string(),
success: Some(true),
},
};
session
.send_event(Event {
id: sub_id.to_string(),
msg: EventMsg::PlanUpdate(args),
})
.await;
output
}
Err(output) => *output,
}
}
fn parse_update_plan_arguments(
arguments: String,
call_id: &str,
) -> Result<UpdatePlanArgs, Box<ResponseInputItem>> {
match serde_json::from_str::<UpdatePlanArgs>(&arguments) {
Ok(args) => Ok(args),
Err(e) => {
let output = ResponseInputItem::FunctionCallOutput {
call_id: call_id.to_string(),
output: FunctionCallOutputPayload {
content: format!("failed to parse function arguments: {e}"),
success: None,
},
};
Err(Box::new(output))
}
}
}

View File

@@ -7,8 +7,7 @@ use std::collections::HashMap;
use std::fmt;
use std::path::Path;
use std::path::PathBuf;
use std::str::FromStr;
use std::time::Duration;
use std::str::FromStr; // Added for FinalOutput Display implementation
use mcp_types::CallToolResult;
use serde::Deserialize;
@@ -20,7 +19,6 @@ use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::message_history::HistoryEntry;
use crate::model_provider_info::ModelProviderInfo;
use crate::plan_tool::UpdatePlanArgs;
/// Submission Queue Entry - requests from user
#[derive(Debug, Clone, Deserialize, Serialize)]
@@ -121,10 +119,6 @@ pub enum Op {
/// Request a single history entry identified by `log_id` + `offset`.
GetHistoryEntryRequest { offset: usize, log_id: u64 },
/// Request the agent to summarize the current conversation context.
/// The agent will use its existing context (either conversation history or previous response id)
/// to generate a summary which will be returned as an AgentMessage event.
SummarizeContext,
/// Request to shut down codex instance.
Shutdown,
}
@@ -341,8 +335,6 @@ pub enum EventMsg {
/// Response to GetHistoryEntryRequest.
GetHistoryEntryResponse(GetHistoryEntryResponseEvent),
PlanUpdate(UpdatePlanArgs),
/// Notification that the agent is shutting down.
ShutdownComplete,
}
@@ -419,7 +411,9 @@ pub struct AgentReasoningDeltaEvent {
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct McpInvocation {
pub struct McpToolCallBeginEvent {
/// Identifier so this can be paired with the McpToolCallEnd event.
pub call_id: String,
/// Name of the MCP server as defined in the config.
pub server: String,
/// Name of the tool as given by the MCP server.
@@ -428,19 +422,10 @@ pub struct McpInvocation {
pub arguments: Option<serde_json::Value>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct McpToolCallBeginEvent {
/// Identifier so this can be paired with the McpToolCallEnd event.
pub call_id: String,
pub invocation: McpInvocation,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct McpToolCallEndEvent {
/// Identifier for the corresponding McpToolCallBegin that finished.
pub call_id: String,
pub invocation: McpInvocation,
pub duration: Duration,
/// Result of the tool call. Note this could be an error.
pub result: Result<CallToolResult, String>,
}

View File

@@ -20,13 +20,8 @@ impl Shell {
return None;
}
let mut result = vec![zsh.shell_path.clone()];
result.push("-lc".to_string());
let joined = strip_bash_lc(&command)
.or_else(|| shlex::try_join(command.iter().map(|s| s.as_str())).ok());
if let Some(joined) = joined {
let mut result = vec![zsh.shell_path.clone(), "-c".to_string()];
if let Ok(joined) = shlex::try_join(command.iter().map(|s| s.as_str())) {
result.push(format!("source {} && ({joined})", zsh.zshrc_path));
} else {
return None;
@@ -38,19 +33,6 @@ impl Shell {
}
}
fn strip_bash_lc(command: &Vec<String>) -> Option<String> {
match command.as_slice() {
// exactly three items
[first, second, third]
// first two must be "bash", "-lc"
if first == "bash" && second == "-lc" =>
{
Some(third.clone())
}
_ => None,
}
}
#[cfg(target_os = "macos")]
pub async fn default_user_shell() -> Shell {
use tokio::process::Command;
@@ -137,29 +119,15 @@ mod tests {
let cases = vec![
(
vec!["myecho"],
vec![shell_path, "-lc", "source ZSHRC_PATH && (myecho)"],
vec![shell_path, "-c", "source ZSHRC_PATH && (myecho)"],
Some("It works!\n"),
),
(
vec!["myecho"],
vec![shell_path, "-lc", "source ZSHRC_PATH && (myecho)"],
Some("It works!\n"),
),
(
vec!["bash", "-c", "echo 'single' \"double\""],
vec![
shell_path,
"-lc",
"source ZSHRC_PATH && (bash -c \"echo 'single' \\\"double\\\"\")",
],
Some("single double\n"),
),
(
vec!["bash", "-lc", "echo 'single' \"double\""],
vec![
shell_path,
"-lc",
"source ZSHRC_PATH && (echo 'single' \"double\")",
"-c",
"source ZSHRC_PATH && (bash -lc \"echo 'single' \\\"double\\\"\")",
],
Some("single double\n"),
),

View File

@@ -64,3 +64,21 @@ pub fn is_inside_git_repo(config: &Config) -> bool {
false
}
/// If `val` is a path to a readable file, return its trimmed contents.
///
/// - When `val` points to a file, this reads the file, trims leading/trailing
/// whitespace and returns `Ok(Some(contents))` unless the trimmed contents are
/// empty in which case it returns `Ok(None)`.
/// - When `val` is not a file path, return `Ok(Some(val.to_string()))` so
/// callers can treat the value as a literal string.
pub fn maybe_read_file(val: &str) -> std::io::Result<Option<String>> {
let p = std::path::Path::new(val);
if p.is_file() {
let s = std::fs::read_to_string(p)?;
let s = s.trim().to_string();
if s.is_empty() { Ok(None) } else { Ok(Some(s)) }
} else {
Ok(Some(val.to_string()))
}
}

View File

@@ -81,96 +81,6 @@ async fn chat_mode_stream_cli() {
server.verify().await;
}
/// Verify that passing `-c experimental_instructions_file=...` to the CLI
/// overrides the built-in base instructions by inspecting the request body
/// received by a mock OpenAI Responses endpoint.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn exec_cli_applies_experimental_instructions_file() {
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
// Start mock server which will capture the request and return a minimal
// SSE stream for a single turn.
let server = MockServer::start().await;
let sse = concat!(
"data: {\"type\":\"response.created\",\"response\":{}}\n\n",
"data: {\"type\":\"response.completed\",\"response\":{\"id\":\"r1\"}}\n\n"
);
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(
ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse, "text/event-stream"),
)
.expect(1)
.mount(&server)
.await;
// Create a temporary instructions file with a unique marker we can assert
// appears in the outbound request payload.
let custom = TempDir::new().unwrap();
let marker = "cli-experimental-instructions-marker";
let custom_path = custom.path().join("instr.md");
std::fs::write(&custom_path, marker).unwrap();
let custom_path_str = custom_path.to_string_lossy().replace('\\', "/");
// Build a provider override that points at the mock server and instructs
// Codex to use the Responses API with the dummy env var.
let provider_override = format!(
"model_providers.mock={{ name = \"mock\", base_url = \"{}/v1\", env_key = \"PATH\", wire_api = \"responses\" }}",
server.uri()
);
let home = TempDir::new().unwrap();
let mut cmd = AssertCommand::new("cargo");
cmd.arg("run")
.arg("-p")
.arg("codex-cli")
.arg("--quiet")
.arg("--")
.arg("exec")
.arg("--skip-git-repo-check")
.arg("-c")
.arg(&provider_override)
.arg("-c")
.arg("model_provider=\"mock\"")
.arg("-c")
.arg(format!(
"experimental_instructions_file=\"{custom_path_str}\""
))
.arg("-C")
.arg(env!("CARGO_MANIFEST_DIR"))
.arg("hello?\n");
cmd.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy")
.env("OPENAI_BASE_URL", format!("{}/v1", server.uri()));
let output = cmd.output().unwrap();
println!("Status: {}", output.status);
println!("Stdout:\n{}", String::from_utf8_lossy(&output.stdout));
println!("Stderr:\n{}", String::from_utf8_lossy(&output.stderr));
assert!(output.status.success());
// Inspect the captured request and verify our custom base instructions were
// included in the `instructions` field.
let request = &server.received_requests().await.unwrap()[0];
let body = request.body_json::<serde_json::Value>().unwrap();
let instructions = body
.get("instructions")
.and_then(|v| v.as_str())
.unwrap_or_default()
.to_string();
assert!(
instructions.contains(marker),
"instructions did not contain custom marker; got: {instructions}"
);
}
/// Tests streaming responses through the CLI using a local SSE fixture file.
/// This test:
/// 1. Uses a pre-recorded SSE response fixture instead of a live server

View File

@@ -1,19 +1,11 @@
use std::path::PathBuf;
use chrono::Utc;
use codex_core::Codex;
use codex_core::CodexSpawnOk;
use codex_core::ModelProviderInfo;
use codex_core::built_in_model_providers;
use codex_core::exec::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use codex_core::protocol::SessionConfiguredEvent;
use codex_login::AuthDotJson;
use codex_login::AuthMode;
use codex_login::CodexAuth;
use codex_login::TokenData;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture_with_id;
use core_test_support::wait_for_event;
@@ -56,23 +48,32 @@ async fn includes_session_id_and_model_headers_in_request() {
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
name: "openai".into(),
base_url: format!("{}/v1", server.uri()),
// Environment variable that should exist in the test environment.
// ModelClient will return an error if the environment variable for the
// provider is not set.
env_key: Some("PATH".into()),
env_key_instructions: None,
wire_api: codex_core::WireApi::Responses,
query_params: None,
http_headers: Some(
[("originator".to_string(), "codex_cli_rs".to_string())]
.into_iter()
.collect(),
),
env_http_headers: None,
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: None,
};
// Init session
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(CodexAuth::from_api_key("Test API Key".to_string())),
ctrl_c.clone(),
)
.await
.unwrap();
let CodexSpawnOk { codex, .. } = Codex::spawn(config, ctrl_c.clone()).await.unwrap();
codex
.submit(Op::UserInput {
@@ -94,20 +95,15 @@ async fn includes_session_id_and_model_headers_in_request() {
// get request from the server
let request = &server.received_requests().await.unwrap()[0];
let request_session_id = request.headers.get("session_id").unwrap();
let request_originator = request.headers.get("originator").unwrap();
let request_authorization = request.headers.get("authorization").unwrap();
let request_body = request.headers.get("session_id").unwrap();
let originator = request.headers.get("originator").unwrap();
assert!(current_session_id.is_some());
assert_eq!(
request_session_id.to_str().unwrap(),
request_body.to_str().unwrap(),
current_session_id.as_ref().unwrap()
);
assert_eq!(request_originator.to_str().unwrap(), "codex_cli_rs");
assert_eq!(
request_authorization.to_str().unwrap(),
"Bearer Test API Key"
);
assert_eq!(originator.to_str().unwrap(), "codex_cli_rs");
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@@ -130,9 +126,22 @@ async fn includes_base_instructions_override_in_request() {
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
name: "openai".into(),
base_url: format!("{}/v1", server.uri()),
// Environment variable that should exist in the test environment.
// ModelClient will return an error if the environment variable for the
// provider is not set.
env_key: Some("PATH".into()),
env_key_instructions: None,
wire_api: codex_core::WireApi::Responses,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: None,
};
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
@@ -140,13 +149,7 @@ async fn includes_base_instructions_override_in_request() {
config.model_provider = model_provider;
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(CodexAuth::from_api_key("Test API Key".to_string())),
ctrl_c.clone(),
)
.await
.unwrap();
let CodexSpawnOk { codex, .. } = Codex::spawn(config, ctrl_c.clone()).await.unwrap();
codex
.submit(Op::UserInput {
@@ -169,172 +172,3 @@ async fn includes_base_instructions_override_in_request() {
.contains("test instructions")
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn chatgpt_auth_sends_correct_request() {
#![allow(clippy::unwrap_used)]
if std::env::var(CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
println!(
"Skipping test because it cannot execute when network is disabled in a Codex sandbox."
);
return;
}
// Mock server
let server = MockServer::start().await;
// First request must NOT include `previous_response_id`.
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
Mock::given(method("POST"))
.and(path("/api/codex/responses"))
.respond_with(first)
.expect(1)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/api/codex", server.uri())),
..built_in_model_providers()["openai"].clone()
};
// Init session
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(auth_from_token("Access Token".to_string())),
ctrl_c.clone(),
)
.await
.unwrap();
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello".into(),
}],
})
.await
.unwrap();
let EventMsg::SessionConfigured(SessionConfiguredEvent { session_id, .. }) =
wait_for_event(&codex, |ev| matches!(ev, EventMsg::SessionConfigured(_))).await
else {
unreachable!()
};
let current_session_id = Some(session_id.to_string());
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
// get request from the server
let request = &server.received_requests().await.unwrap()[0];
let request_session_id = request.headers.get("session_id").unwrap();
let request_originator = request.headers.get("originator").unwrap();
let request_authorization = request.headers.get("authorization").unwrap();
let request_body = request.body_json::<serde_json::Value>().unwrap();
assert!(current_session_id.is_some());
assert_eq!(
request_session_id.to_str().unwrap(),
current_session_id.as_ref().unwrap()
);
assert_eq!(request_originator.to_str().unwrap(), "codex_cli_rs");
assert_eq!(
request_authorization.to_str().unwrap(),
"Bearer Access Token"
);
assert!(!request_body["store"].as_bool().unwrap());
assert!(request_body["stream"].as_bool().unwrap());
assert_eq!(
request_body["include"][0].as_str().unwrap(),
"reasoning.encrypted_content"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn includes_user_instructions_message_in_request() {
#![allow(clippy::unwrap_used)]
let server = MockServer::start().await;
let first = ResponseTemplate::new(200)
.insert_header("content-type", "text/event-stream")
.set_body_raw(sse_completed("resp1"), "text/event-stream");
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(first)
.expect(1)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
config.user_instructions = Some("be nice".to_string());
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(CodexAuth::from_api_key("Test API Key".to_string())),
ctrl_c.clone(),
)
.await
.unwrap();
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: "hello".into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let request = &server.received_requests().await.unwrap()[0];
let request_body = request.body_json::<serde_json::Value>().unwrap();
assert!(
!request_body["instructions"]
.as_str()
.unwrap()
.contains("be nice")
);
assert_eq!(request_body["input"][0]["role"], "user");
assert!(
request_body["input"][0]["content"][0]["text"]
.as_str()
.unwrap()
.starts_with("be nice")
);
}
fn auth_from_token(id_token: String) -> CodexAuth {
CodexAuth::new(
None,
AuthMode::ChatGPT,
PathBuf::new(),
Some(AuthDotJson {
tokens: TokenData {
id_token,
access_token: "Access Token".to_string(),
refresh_token: "test".to_string(),
account_id: None,
},
last_refresh: Utc::now(),
openai_api_key: None,
}),
)
}

View File

@@ -50,7 +50,7 @@ async fn spawn_codex() -> Result<Codex, CodexErr> {
config.model_provider.request_max_retries = Some(2);
config.model_provider.stream_max_retries = Some(2);
let CodexSpawnOk { codex: agent, .. } =
Codex::spawn(config, None, std::sync::Arc::new(Notify::new())).await?;
Codex::spawn(config, std::sync::Arc::new(Notify::new())).await?;
Ok(agent)
}

View File

@@ -10,7 +10,6 @@ use codex_core::exec::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use codex_login::CodexAuth;
use core_test_support::load_default_config_for_test;
use core_test_support::load_sse_fixture;
use core_test_support::load_sse_fixture_with_id;
@@ -76,7 +75,7 @@ async fn retries_on_early_close() {
let model_provider = ModelProviderInfo {
name: "openai".into(),
base_url: Some(format!("{}/v1", server.uri())),
base_url: format!("{}/v1", server.uri()),
// Environment variable that should exist in the test environment.
// ModelClient will return an error if the environment variable for the
// provider is not set.
@@ -90,20 +89,13 @@ async fn retries_on_early_close() {
request_max_retries: Some(0),
stream_max_retries: Some(1),
stream_idle_timeout_ms: Some(2000),
requires_auth: false,
};
let ctrl_c = std::sync::Arc::new(tokio::sync::Notify::new());
let codex_home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&codex_home);
config.model_provider = model_provider;
let CodexSpawnOk { codex, .. } = Codex::spawn(
config,
Some(CodexAuth::from_api_key("Test API Key".to_string())),
ctrl_c,
)
.await
.unwrap();
let CodexSpawnOk { codex, .. } = Codex::spawn(config, ctrl_c).await.unwrap();
codex
.submit(Op::UserInput {

View File

@@ -1,41 +0,0 @@
#![expect(clippy::unwrap_used, clippy::expect_used)]
//! Tests for the `Op::SummarizeContext` operation added to verify that
//! summarization requests are properly handled and injected as user input.
use std::time::Duration;
use codex_core::Codex;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;
use core_test_support::load_default_config_for_test;
use tempfile::TempDir;
use tokio::time::timeout;
/// Helper function to set up a codex session and wait for it to be configured
async fn setup_configured_codex_session() -> Codex {
let codex_home = TempDir::new().unwrap();
let config = load_default_config_for_test(&codex_home);
let codex_conversation = codex_core::codex_wrapper::init_codex(config).await.unwrap();
codex_conversation.codex
}
#[tokio::test]
async fn test_summarize_context_spawns_new_agent_task() {
// Test the specific behavior: when there's no current task,
// SummarizeContext should spawn a new AgentTask with the summarization prompt
let codex = setup_configured_codex_session().await;
// At this point, there should be no current task running
let _sub_id = codex.submit(Op::SummarizeContext).await.unwrap();
let event = timeout(Duration::from_secs(5), codex.next_event())
.await
.expect("timeout waiting for task started event")
.expect("codex closed");
assert!(
matches!(event.msg, EventMsg::TaskStarted),
"Expected TaskStarted when no current task exists - should spawn new AgentTask"
);
}

View File

@@ -63,6 +63,40 @@ pub struct Cli {
/// if `-` is used), instructions are read from stdin.
#[arg(value_name = "PROMPT")]
pub prompt: Option<String>,
/// Override the built-in system prompt (base instructions).
///
/// If the value looks like a path to an existing file, the contents of the
/// file are used. Otherwise, the value itself is used verbatim as the
/// instructions string.
#[arg(long = "experimental-instructions")]
pub experimental_instructions: Option<String>,
}
#[cfg(test)]
mod tests {
use super::Cli;
use clap::CommandFactory;
#[test]
fn help_includes_file_behavior_for_experimental_instructions() {
let mut cmd = Cli::command();
let mut buf: Vec<u8> = Vec::new();
assert!(cmd.write_long_help(&mut buf).is_ok(), "help should render");
let help = match String::from_utf8(buf) {
Ok(s) => s,
Err(e) => panic!("invalid utf8: {e}"),
};
assert!(help.contains("Override the built-in system prompt (base instructions)."));
assert!(help.contains(
"If the value looks like a path to an existing file, the contents of the file are used."
));
assert!(
help.contains(
"Otherwise, the value itself is used verbatim as the instructions string."
)
);
}
}
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, ValueEnum)]

View File

@@ -1,4 +1,5 @@
use std::path::Path;
use std::path::PathBuf;
use codex_common::summarize_sandbox_policy;
use codex_core::WireApi;
@@ -20,7 +21,16 @@ pub(crate) trait EventProcessor {
fn process_event(&mut self, event: Event) -> CodexStatus;
}
pub(crate) fn create_config_summary_entries(config: &Config) -> Vec<(&'static str, String)> {
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum PromptOrigin {
File(PathBuf),
Literal,
}
pub(crate) fn create_config_summary_entries(
config: &Config,
prompt_origin: Option<&PromptOrigin>,
) -> Vec<(&'static str, String)> {
let mut entries = vec![
("workdir", config.cwd.display().to_string()),
("model", config.model.clone()),
@@ -28,6 +38,16 @@ pub(crate) fn create_config_summary_entries(config: &Config) -> Vec<(&'static st
("approval", config.approval_policy.to_string()),
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
];
if let Some(origin) = prompt_origin {
let prompt_val = match origin {
PromptOrigin::Literal => "experimental".to_string(),
PromptOrigin::File(path) => path
.file_name()
.map(|s| s.to_string_lossy().to_string())
.unwrap_or_else(|| path.display().to_string()),
};
entries.push(("prompt_origin", prompt_val));
}
if config.model_provider.wire_api == WireApi::Responses
&& model_supports_reasoning_summaries(config)
{
@@ -68,3 +88,64 @@ fn write_last_message_file(contents: &str, last_message_path: Option<&Path>) {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::ConfigToml;
use std::collections::HashMap;
use tempfile::TempDir;
fn minimal_config() -> Config {
let cwd = match TempDir::new() {
Ok(t) => t,
Err(e) => panic!("tempdir error: {e}"),
};
let codex_home = match TempDir::new() {
Ok(t) => t,
Err(e) => panic!("tempdir error: {e}"),
};
let cfg = ConfigToml {
..Default::default()
};
let overrides = ConfigOverrides {
cwd: Some(cwd.path().to_path_buf()),
..Default::default()
};
match Config::load_from_base_config_with_overrides(
cfg,
overrides,
codex_home.path().to_path_buf(),
) {
Ok(c) => c,
Err(e) => panic!("config error: {e}"),
}
}
#[test]
fn entries_include_prompt_origin_experimental_for_literal_origin() {
let mut cfg = minimal_config();
cfg.base_instructions = Some("hello".to_string());
let entries = create_config_summary_entries(&cfg, Some(&PromptOrigin::Literal));
let map: HashMap<_, _> = entries.into_iter().collect();
assert_eq!(
map.get("prompt_origin").cloned(),
Some("experimental".to_string())
);
}
#[test]
fn entries_include_prompt_origin_filename_for_file_origin() {
let mut cfg = minimal_config();
cfg.base_instructions = Some("hello".to_string());
let path = PathBuf::from("/tmp/custom_instructions.txt");
let entries = create_config_summary_entries(&cfg, Some(&PromptOrigin::File(path.clone())));
let map: HashMap<_, _> = entries.into_iter().collect();
assert_eq!(
map.get("prompt_origin").cloned(),
Some("custom_instructions.txt".to_string())
);
}
}

View File

@@ -1,7 +1,5 @@
use codex_common::elapsed::format_duration;
use codex_common::elapsed::format_elapsed;
use codex_core::config::Config;
use codex_core::plan_tool::UpdatePlanArgs;
use codex_core::protocol::AgentMessageDeltaEvent;
use codex_core::protocol::AgentMessageEvent;
use codex_core::protocol::AgentReasoningDeltaEvent;
@@ -12,7 +10,6 @@ use codex_core::protocol::EventMsg;
use codex_core::protocol::ExecCommandBeginEvent;
use codex_core::protocol::ExecCommandEndEvent;
use codex_core::protocol::FileChange;
use codex_core::protocol::McpInvocation;
use codex_core::protocol::McpToolCallBeginEvent;
use codex_core::protocol::McpToolCallEndEvent;
use codex_core::protocol::PatchApplyBeginEvent;
@@ -30,6 +27,7 @@ use std::time::Instant;
use crate::event_processor::CodexStatus;
use crate::event_processor::EventProcessor;
use crate::event_processor::PromptOrigin;
use crate::event_processor::create_config_summary_entries;
use crate::event_processor::handle_last_message;
@@ -40,6 +38,11 @@ pub(crate) struct EventProcessorWithHumanOutput {
call_id_to_command: HashMap<String, ExecCommandBegin>,
call_id_to_patch: HashMap<String, PatchApplyBegin>,
/// Tracks in-flight MCP tool calls so we can calculate duration and print
/// a concise summary when the corresponding `McpToolCallEnd` event is
/// received.
call_id_to_tool_call: HashMap<String, McpToolCallBegin>,
// To ensure that --color=never is respected, ANSI escapes _must_ be added
// using .style() with one of these fields. If you need a new style, add a
// new field here.
@@ -57,6 +60,7 @@ pub(crate) struct EventProcessorWithHumanOutput {
answer_started: bool,
reasoning_started: bool,
last_message_path: Option<PathBuf>,
prompt_origin: Option<PromptOrigin>,
}
impl EventProcessorWithHumanOutput {
@@ -64,9 +68,11 @@ impl EventProcessorWithHumanOutput {
with_ansi: bool,
config: &Config,
last_message_path: Option<PathBuf>,
prompt_origin: Option<PromptOrigin>,
) -> Self {
let call_id_to_command = HashMap::new();
let call_id_to_patch = HashMap::new();
let call_id_to_tool_call = HashMap::new();
if with_ansi {
Self {
@@ -79,10 +85,12 @@ impl EventProcessorWithHumanOutput {
red: Style::new().red(),
green: Style::new().green(),
cyan: Style::new().cyan(),
call_id_to_tool_call,
show_agent_reasoning: !config.hide_agent_reasoning,
answer_started: false,
reasoning_started: false,
last_message_path,
prompt_origin,
}
} else {
Self {
@@ -95,10 +103,12 @@ impl EventProcessorWithHumanOutput {
red: Style::new(),
green: Style::new(),
cyan: Style::new(),
call_id_to_tool_call,
show_agent_reasoning: !config.hide_agent_reasoning,
answer_started: false,
reasoning_started: false,
last_message_path,
prompt_origin,
}
}
}
@@ -109,6 +119,14 @@ struct ExecCommandBegin {
start_time: Instant,
}
/// Metadata captured when an `McpToolCallBegin` event is received.
struct McpToolCallBegin {
/// Formatted invocation string, e.g. `server.tool({"city":"sf"})`.
invocation: String,
/// Timestamp when the call started so we can compute duration later.
start_time: Instant,
}
struct PatchApplyBegin {
start_time: Instant,
auto_approved: bool,
@@ -137,7 +155,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
VERSION
);
let entries = create_config_summary_entries(config);
let entries = create_config_summary_entries(config, self.prompt_origin.as_ref());
for (key, value) in entries {
println!("{} {}", format!("{key}:").style(self.bold), value);
@@ -278,33 +296,63 @@ impl EventProcessor for EventProcessorWithHumanOutput {
println!("{}", truncated_output.style(self.dimmed));
}
EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
call_id: _,
invocation,
call_id,
server,
tool,
arguments,
}) => {
// Build fully-qualified tool name: server.tool
let fq_tool_name = format!("{server}.{tool}");
// Format arguments as compact JSON so they fit on one line.
let args_str = arguments
.as_ref()
.map(|v: &serde_json::Value| {
serde_json::to_string(v).unwrap_or_else(|_| v.to_string())
})
.unwrap_or_default();
let invocation = if args_str.is_empty() {
format!("{fq_tool_name}()")
} else {
format!("{fq_tool_name}({args_str})")
};
self.call_id_to_tool_call.insert(
call_id.clone(),
McpToolCallBegin {
invocation: invocation.clone(),
start_time: Instant::now(),
},
);
ts_println!(
self,
"{} {}",
"tool".style(self.magenta),
format_mcp_invocation(&invocation).style(self.bold),
invocation.style(self.bold),
);
}
EventMsg::McpToolCallEnd(tool_call_end_event) => {
let is_success = tool_call_end_event.is_success();
let McpToolCallEndEvent {
call_id: _,
result,
invocation,
duration,
} = tool_call_end_event;
let McpToolCallEndEvent { call_id, result } = tool_call_end_event;
// Retrieve start time and invocation for duration calculation and labeling.
let info = self.call_id_to_tool_call.remove(&call_id);
let duration = format!(" in {}", format_duration(duration));
let (duration, invocation) = if let Some(McpToolCallBegin {
invocation,
start_time,
..
}) = info
{
(format!(" in {}", format_elapsed(start_time)), invocation)
} else {
(String::new(), format!("tool('{call_id}')"))
};
let status_str = if is_success { "success" } else { "failed" };
let title_style = if is_success { self.green } else { self.red };
let title = format!(
"{} {status_str}{duration}:",
format_mcp_invocation(&invocation)
);
let title = format!("{invocation} {status_str}{duration}:");
ts_println!(self, "{}", title.style(title_style));
@@ -470,11 +518,6 @@ impl EventProcessor for EventProcessorWithHumanOutput {
ts_println!(self, "model: {}", model);
println!();
}
EventMsg::PlanUpdate(plan_update_event) => {
let UpdatePlanArgs { explanation, plan } = plan_update_event;
ts_println!(self, "explanation: {explanation:?}");
ts_println!(self, "plan: {plan:?}");
}
EventMsg::GetHistoryEntryResponse(_) => {
// Currently ignored in exec output.
}
@@ -500,21 +543,3 @@ fn format_file_change(change: &FileChange) -> &'static str {
} => "M",
}
}
fn format_mcp_invocation(invocation: &McpInvocation) -> String {
// Build fully-qualified tool name: server.tool
let fq_tool_name = format!("{}.{}", invocation.server, invocation.tool);
// Format arguments as compact JSON so they fit on one line.
let args_str = invocation
.arguments
.as_ref()
.map(|v: &serde_json::Value| serde_json::to_string(v).unwrap_or_else(|_| v.to_string()))
.unwrap_or_default();
if args_str.is_empty() {
format!("{fq_tool_name}()")
} else {
format!("{fq_tool_name}({args_str})")
}
}

View File

@@ -9,22 +9,27 @@ use serde_json::json;
use crate::event_processor::CodexStatus;
use crate::event_processor::EventProcessor;
use crate::event_processor::PromptOrigin;
use crate::event_processor::create_config_summary_entries;
use crate::event_processor::handle_last_message;
pub(crate) struct EventProcessorWithJsonOutput {
last_message_path: Option<PathBuf>,
prompt_origin: Option<PromptOrigin>,
}
impl EventProcessorWithJsonOutput {
pub fn new(last_message_path: Option<PathBuf>) -> Self {
Self { last_message_path }
pub fn new(last_message_path: Option<PathBuf>, prompt_origin: Option<PromptOrigin>) -> Self {
Self {
last_message_path,
prompt_origin,
}
}
}
impl EventProcessor for EventProcessorWithJsonOutput {
fn print_config_summary(&mut self, config: &Config, prompt: &str) {
let entries = create_config_summary_entries(config)
let entries = create_config_summary_entries(config, self.prompt_origin.as_ref())
.into_iter()
.map(|(key, value)| (key.to_string(), value))
.collect::<HashMap<String, String>>();

View File

@@ -8,6 +8,7 @@ use std::io::Read;
use std::path::PathBuf;
use std::sync::Arc;
use crate::event_processor::PromptOrigin;
pub use cli::Cli;
use codex_core::codex_wrapper::CodexConversation;
use codex_core::codex_wrapper::{self};
@@ -21,6 +22,7 @@ use codex_core::protocol::InputItem;
use codex_core::protocol::Op;
use codex_core::protocol::TaskCompleteEvent;
use codex_core::util::is_inside_git_repo;
use codex_core::util::maybe_read_file;
use event_processor_with_human_output::EventProcessorWithHumanOutput;
use event_processor_with_json_output::EventProcessorWithJsonOutput;
use tracing::debug;
@@ -45,9 +47,38 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
json: json_mode,
sandbox_mode: sandbox_mode_cli_arg,
prompt,
experimental_instructions,
config_overrides,
} = cli;
// Determine how to describe experimental instructions in the summary and
// prepare the effective base instructions. If the flag points at a file,
// read its contents; otherwise use the value verbatim.
let mut prompt_origin = match experimental_instructions.as_deref() {
Some(val) => {
let p = std::path::Path::new(val);
if p.is_file() {
Some(PromptOrigin::File(p.to_path_buf()))
} else {
Some(PromptOrigin::Literal)
}
}
None => None,
};
let experimental_instructions = match experimental_instructions {
Some(val) => match maybe_read_file(&val) {
Ok(Some(contents)) => Some(contents),
Ok(None) => None,
Err(e) => {
eprintln!("Failed to read --experimental-instructions file: {e}");
std::process::exit(1);
}
},
None => None,
};
let has_experimental = experimental_instructions.is_some();
// Determine the prompt based on CLI arg and/or stdin.
let prompt = match prompt {
Some(p) if p != "-" => p,
@@ -92,20 +123,6 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
),
};
// TODO(mbolin): Take a more thoughtful approach to logging.
let default_level = "error";
let _ = tracing_subscriber::fmt()
// Fallback to the `default_level` log filter if the environment
// variable is not set _or_ contains an invalid value
.with_env_filter(
EnvFilter::try_from_default_env()
.or_else(|_| EnvFilter::try_new(default_level))
.unwrap_or_else(|_| EnvFilter::new(default_level)),
)
.with_ansi(stderr_with_ansi)
.with_writer(std::io::stderr)
.try_init();
let sandbox_mode = if full_auto {
Some(SandboxMode::WorkspaceWrite)
} else if dangerously_bypass_approvals_and_sandbox {
@@ -125,8 +142,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
cwd: cwd.map(|p| p.canonicalize().unwrap_or(p)),
model_provider: None,
codex_linux_sandbox_exe,
base_instructions: None,
include_plan_tool: None,
base_instructions: experimental_instructions,
};
// Parse `-c` overrides.
let cli_kv_overrides = match config_overrides.parse_overrides() {
@@ -138,13 +154,21 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
};
let config = Config::load_with_cli_overrides(cli_kv_overrides, overrides)?;
if !has_experimental {
prompt_origin = None;
}
let mut event_processor: Box<dyn EventProcessor> = if json_mode {
Box::new(EventProcessorWithJsonOutput::new(last_message_file.clone()))
Box::new(EventProcessorWithJsonOutput::new(
last_message_file.clone(),
prompt_origin.clone(),
))
} else {
Box::new(EventProcessorWithHumanOutput::create_with_ansi(
stdout_with_ansi,
&config,
last_message_file.clone(),
prompt_origin,
))
};
@@ -157,6 +181,20 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
std::process::exit(1);
}
// TODO(mbolin): Take a more thoughtful approach to logging.
let default_level = "error";
let _ = tracing_subscriber::fmt()
// Fallback to the `default_level` log filter if the environment
// variable is not set _or_ contains an invalid value
.with_env_filter(
EnvFilter::try_from_default_env()
.or_else(|_| EnvFilter::try_new(default_level))
.unwrap_or_else(|_| EnvFilter::new(default_level)),
)
.with_ansi(stderr_with_ansi)
.with_writer(std::io::stderr)
.try_init();
let CodexConversation {
codex: codex_wrapper,
session_configured,
@@ -246,3 +284,53 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
Ok(())
}
#[cfg(test)]
mod tests {
use codex_core::util::maybe_read_file;
use std::fs;
use tempfile::NamedTempFile;
#[test]
fn maybe_read_file_returns_literal_for_non_path() {
let res = match maybe_read_file("You are a helpful assistant.") {
Ok(v) => v,
Err(e) => panic!("error: {e}"),
};
assert_eq!(res, Some("You are a helpful assistant.".to_string()));
}
#[test]
fn maybe_read_file_reads_and_trims_file_contents() {
let tf = match NamedTempFile::new() {
Ok(t) => t,
Err(e) => panic!("tempfile: {e}"),
};
if let Err(e) = fs::write(tf.path(), " Hello world\n") {
panic!("write temp file: {e}");
}
let path_s = tf.path().to_string_lossy().to_string();
let res = match maybe_read_file(&path_s) {
Ok(v) => v,
Err(e) => panic!("should read file successfully: {e}"),
};
assert_eq!(res, Some("Hello world".to_string()));
}
#[test]
fn maybe_read_file_empty_file_returns_none() {
let tf = match NamedTempFile::new() {
Ok(t) => t,
Err(e) => panic!("tempfile: {e}"),
};
if let Err(e) = fs::write(tf.path(), " \n\t ") {
panic!("write temp file: {e}");
}
let path_s = tf.path().to_string_lossy().to_string();
let res = match maybe_read_file(&path_s) {
Ok(v) => v,
Err(e) => panic!("should read file successfully: {e}"),
};
assert_eq!(res, None);
}
}

View File

@@ -1,152 +1,19 @@
use chrono::DateTime;
use chrono::Utc;
use serde::Deserialize;
use serde::Serialize;
use std::env;
use std::fs::OpenOptions;
use std::io::Read;
use std::io::Write;
#[cfg(unix)]
use std::os::unix::fs::OpenOptionsExt;
use std::path::Path;
use std::path::PathBuf;
use std::process::Stdio;
use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
use tokio::process::Command;
const SOURCE_FOR_PYTHON_SERVER: &str = include_str!("./login_with_chatgpt.py");
const CLIENT_ID: &str = "app_EMoamEEZ73f0CkXaXp7hrann";
const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY";
#[derive(Clone, Debug, PartialEq)]
pub enum AuthMode {
ApiKey,
ChatGPT,
}
#[derive(Debug, Clone)]
pub struct CodexAuth {
pub api_key: Option<String>,
pub mode: AuthMode,
auth_dot_json: Arc<Mutex<Option<AuthDotJson>>>,
auth_file: PathBuf,
}
impl PartialEq for CodexAuth {
fn eq(&self, other: &Self) -> bool {
self.mode == other.mode
}
}
impl CodexAuth {
pub fn new(
api_key: Option<String>,
mode: AuthMode,
auth_file: PathBuf,
auth_dot_json: Option<AuthDotJson>,
) -> Self {
let auth_dot_json = Arc::new(Mutex::new(auth_dot_json));
Self {
api_key,
mode,
auth_file,
auth_dot_json,
}
}
pub fn from_api_key(api_key: String) -> Self {
Self {
api_key: Some(api_key),
mode: AuthMode::ApiKey,
auth_file: PathBuf::new(),
auth_dot_json: Arc::new(Mutex::new(None)),
}
}
pub async fn get_token_data(&self) -> Result<TokenData, std::io::Error> {
#[expect(clippy::unwrap_used)]
let auth_dot_json = self.auth_dot_json.lock().unwrap().clone();
match auth_dot_json {
Some(auth_dot_json) => {
if auth_dot_json.last_refresh < Utc::now() - chrono::Duration::days(28) {
let refresh_response = tokio::time::timeout(
Duration::from_secs(60),
try_refresh_token(auth_dot_json.tokens.refresh_token.clone()),
)
.await
.map_err(|_| {
std::io::Error::other("timed out while refreshing OpenAI API key")
})?
.map_err(std::io::Error::other)?;
let updated_auth_dot_json = update_tokens(
&self.auth_file,
refresh_response.id_token,
refresh_response.access_token,
refresh_response.refresh_token,
)
.await?;
#[expect(clippy::unwrap_used)]
let mut auth_dot_json = self.auth_dot_json.lock().unwrap();
*auth_dot_json = Some(updated_auth_dot_json);
}
Ok(auth_dot_json.tokens.clone())
}
None => Err(std::io::Error::other("Token data is not available.")),
}
}
pub async fn get_token(&self) -> Result<String, std::io::Error> {
match self.mode {
AuthMode::ApiKey => Ok(self.api_key.clone().unwrap_or_default()),
AuthMode::ChatGPT => {
let id_token = self.get_token_data().await?.access_token;
Ok(id_token)
}
}
}
}
// Loads the available auth information from the auth.json or OPENAI_API_KEY environment variable.
pub fn load_auth(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> {
let auth_file = codex_home.join("auth.json");
let auth_dot_json = try_read_auth_json(&auth_file).ok();
let auth_json_api_key = auth_dot_json
.as_ref()
.and_then(|a| a.openai_api_key.clone())
.filter(|s| !s.is_empty());
let openai_api_key = env::var(OPENAI_API_KEY_ENV_VAR)
.ok()
.filter(|s| !s.is_empty())
.or(auth_json_api_key);
if openai_api_key.is_none() && auth_dot_json.is_none() {
return Ok(None);
}
let mode = if openai_api_key.is_some() {
AuthMode::ApiKey
} else {
AuthMode::ChatGPT
};
Ok(Some(CodexAuth {
api_key: openai_api_key,
mode,
auth_file,
auth_dot_json: Arc::new(Mutex::new(auth_dot_json)),
}))
}
/// Run `python3 -c {{SOURCE_FOR_PYTHON_SERVER}}` with the CODEX_HOME
/// environment variable set to the provided `codex_home` path. If the
@@ -157,12 +24,14 @@ pub fn load_auth(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> {
/// If `capture_output` is true, the subprocess's output will be captured and
/// recorded in memory. Otherwise, the subprocess's output will be sent to the
/// current process's stdout/stderr.
pub async fn login_with_chatgpt(codex_home: &Path, capture_output: bool) -> std::io::Result<()> {
pub async fn login_with_chatgpt(
codex_home: &Path,
capture_output: bool,
) -> std::io::Result<String> {
let child = Command::new("python3")
.arg("-c")
.arg(SOURCE_FOR_PYTHON_SERVER)
.env("CODEX_HOME", codex_home)
.env("CODEX_CLIENT_ID", CLIENT_ID)
.stdin(Stdio::null())
.stdout(if capture_output {
Stdio::piped()
@@ -178,7 +47,7 @@ pub async fn login_with_chatgpt(codex_home: &Path, capture_output: bool) -> std:
let output = child.wait_with_output().await?;
if output.status.success() {
Ok(())
try_read_openai_api_key(codex_home).await
} else {
let stderr = String::from_utf8_lossy(&output.stderr);
Err(std::io::Error::other(format!(
@@ -187,54 +56,61 @@ pub async fn login_with_chatgpt(codex_home: &Path, capture_output: bool) -> std:
}
}
/// Attempt to read the `OPENAI_API_KEY` from the `auth.json` file in the given
/// `CODEX_HOME` directory, refreshing it, if necessary.
pub async fn try_read_openai_api_key(codex_home: &Path) -> std::io::Result<String> {
let auth_dot_json = try_read_auth_json(codex_home).await?;
Ok(auth_dot_json.openai_api_key)
}
/// Attempt to read and refresh the `auth.json` file in the given `CODEX_HOME` directory.
/// Returns the full AuthDotJson structure after refreshing if necessary.
pub fn try_read_auth_json(auth_file: &Path) -> std::io::Result<AuthDotJson> {
let mut file = std::fs::File::open(auth_file)?;
pub async fn try_read_auth_json(codex_home: &Path) -> std::io::Result<AuthDotJson> {
let auth_path = codex_home.join("auth.json");
let mut file = std::fs::File::open(&auth_path)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let auth_dot_json: AuthDotJson = serde_json::from_str(&contents)?;
Ok(auth_dot_json)
if is_expired(&auth_dot_json) {
let refresh_response = try_refresh_token(&auth_dot_json).await?;
let mut auth_dot_json = auth_dot_json;
auth_dot_json.tokens.id_token = refresh_response.id_token;
if let Some(refresh_token) = refresh_response.refresh_token {
auth_dot_json.tokens.refresh_token = refresh_token;
}
auth_dot_json.last_refresh = Utc::now();
let mut options = OpenOptions::new();
options.truncate(true).write(true).create(true);
#[cfg(unix)]
{
options.mode(0o600);
}
let json_data = serde_json::to_string(&auth_dot_json)?;
{
let mut file = options.open(&auth_path)?;
file.write_all(json_data.as_bytes())?;
file.flush()?;
}
Ok(auth_dot_json)
} else {
Ok(auth_dot_json)
}
}
async fn update_tokens(
auth_file: &Path,
id_token: String,
access_token: Option<String>,
refresh_token: Option<String>,
) -> std::io::Result<AuthDotJson> {
let mut options = OpenOptions::new();
options.truncate(true).write(true).create(true);
#[cfg(unix)]
{
options.mode(0o600);
}
let mut auth_dot_json = try_read_auth_json(auth_file)?;
auth_dot_json.tokens.id_token = id_token.to_string();
if let Some(access_token) = access_token {
auth_dot_json.tokens.access_token = access_token.to_string();
}
if let Some(refresh_token) = refresh_token {
auth_dot_json.tokens.refresh_token = refresh_token.to_string();
}
auth_dot_json.last_refresh = Utc::now();
let json_data = serde_json::to_string_pretty(&auth_dot_json)?;
{
let mut file = options.open(auth_file)?;
file.write_all(json_data.as_bytes())?;
file.flush()?;
}
Ok(auth_dot_json)
fn is_expired(auth_dot_json: &AuthDotJson) -> bool {
let last_refresh = auth_dot_json.last_refresh;
last_refresh < Utc::now() - chrono::Duration::days(28)
}
async fn try_refresh_token(refresh_token: String) -> std::io::Result<RefreshResponse> {
async fn try_refresh_token(auth_dot_json: &AuthDotJson) -> std::io::Result<RefreshResponse> {
let refresh_request = RefreshRequest {
client_id: CLIENT_ID,
grant_type: "refresh_token",
refresh_token,
refresh_token: auth_dot_json.tokens.refresh_token.clone(),
scope: "openid profile email",
};
@@ -269,25 +145,24 @@ struct RefreshRequest {
scope: &'static str,
}
#[derive(Deserialize, Clone)]
#[derive(Deserialize)]
struct RefreshResponse {
id_token: String,
access_token: Option<String>,
refresh_token: Option<String>,
}
/// Expected structure for $CODEX_HOME/auth.json.
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
#[derive(Deserialize, Serialize)]
pub struct AuthDotJson {
#[serde(rename = "OPENAI_API_KEY")]
pub openai_api_key: Option<String>,
pub openai_api_key: String,
pub tokens: TokenData,
pub last_refresh: DateTime<Utc>,
}
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
#[derive(Deserialize, Serialize, Clone)]
pub struct TokenData {
/// This is a JWT.
pub id_token: String,
@@ -297,5 +172,5 @@ pub struct TokenData {
pub refresh_token: String,
pub account_id: Option<String>,
pub account_id: String,
}

View File

@@ -41,6 +41,7 @@ from typing import Any, Dict # for type hints
REQUIRED_PORT = 1455
URL_BASE = f"http://localhost:{REQUIRED_PORT}"
DEFAULT_ISSUER = "https://auth.openai.com"
DEFAULT_CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann"
EXIT_CODE_WHEN_ADDRESS_ALREADY_IN_USE = 13
@@ -57,7 +58,7 @@ class TokenData:
class AuthBundle:
"""Aggregates authentication data produced after successful OAuth flow."""
api_key: str | None
api_key: str
token_data: TokenData
last_refresh: str
@@ -77,18 +78,12 @@ def main() -> None:
eprint("ERROR: CODEX_HOME environment variable is not set")
sys.exit(1)
client_id = os.getenv("CODEX_CLIENT_ID")
if not client_id:
eprint("ERROR: CODEX_CLIENT_ID environment variable is not set")
sys.exit(1)
# Spawn server.
try:
httpd = _ApiKeyHTTPServer(
("127.0.0.1", REQUIRED_PORT),
_ApiKeyHTTPHandler,
codex_home=codex_home,
client_id=client_id,
verbose=args.verbose,
)
except OSError as e:
@@ -162,7 +157,7 @@ class _ApiKeyHTTPHandler(http.server.BaseHTTPRequestHandler):
return
try:
auth_bundle, success_url = self._exchange_code(code)
auth_bundle, success_url = self._exchange_code_for_api_key(code)
except Exception as exc: # noqa: BLE001 propagate to client
self.send_error(500, f"Token exchange failed: {exc}")
return
@@ -216,22 +211,68 @@ class _ApiKeyHTTPHandler(http.server.BaseHTTPRequestHandler):
if getattr(self.server, "verbose", False): # type: ignore[attr-defined]
super().log_message(fmt, *args)
def _obtain_api_key(
self,
token_claims: Dict[str, Any],
access_claims: Dict[str, Any],
token_data: TokenData,
) -> tuple[str | None, str | None]:
"""Obtain an API key from the auth service.
def _exchange_code_for_api_key(self, code: str) -> tuple[AuthBundle, str]:
"""Perform token + token-exchange to obtain an OpenAI API key.
Returns (api_key, success_url) if successful, None otherwise.
Returns (AuthBundle, success_url).
"""
org_id = token_claims.get("organization_id")
project_id = token_claims.get("project_id")
token_endpoint = f"{self.server.issuer}/oauth/token"
if not org_id or not project_id:
return (None, None)
# 1. Authorization-code -> (id_token, access_token, refresh_token)
data = urllib.parse.urlencode(
{
"grant_type": "authorization_code",
"code": code,
"redirect_uri": self.server.redirect_uri,
"client_id": self.server.client_id,
"code_verifier": self.server.pkce.code_verifier,
}
).encode()
token_data: TokenData
with urllib.request.urlopen(
urllib.request.Request(
token_endpoint,
data=data,
method="POST",
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
) as resp:
payload = json.loads(resp.read().decode())
# Extract chatgpt_account_id from id_token
id_token_parts = payload["id_token"].split(".")
if len(id_token_parts) != 3:
raise ValueError("Invalid ID token")
id_token_claims = _decode_jwt_segment(id_token_parts[1])
auth_claims = id_token_claims.get("https://api.openai.com/auth", {})
chatgpt_account_id = auth_claims.get("chatgpt_account_id", "")
token_data = TokenData(
id_token=payload["id_token"],
access_token=payload["access_token"],
refresh_token=payload["refresh_token"],
account_id=chatgpt_account_id,
)
access_token_parts = token_data.access_token.split(".")
if len(access_token_parts) != 3:
raise ValueError("Invalid access token")
access_token_claims = _decode_jwt_segment(access_token_parts[1])
token_claims = id_token_claims.get("https://api.openai.com/auth", {})
access_claims = access_token_claims.get("https://api.openai.com/auth", {})
org_id = token_claims.get("organization_id")
if not org_id:
raise ValueError("Missing organization in id_token claims")
project_id = token_claims.get("project_id")
if not project_id:
raise ValueError("Missing project in id_token claims")
random_id = secrets.token_hex(6)
@@ -251,7 +292,7 @@ class _ApiKeyHTTPHandler(http.server.BaseHTTPRequestHandler):
exchanged_access_token: str
with urllib.request.urlopen(
urllib.request.Request(
self.server.token_endpoint,
token_endpoint,
data=exchange_data,
method="POST",
headers={"Content-Type": "application/x-www-form-urlencoded"},
@@ -299,65 +340,6 @@ class _ApiKeyHTTPHandler(http.server.BaseHTTPRequestHandler):
except Exception as exc: # pragma: no cover best-effort only
eprint(f"Unable to redeem ChatGPT subscriber API credits: {exc}")
return (exchanged_access_token, success_url)
def _exchange_code(self, code: str) -> tuple[AuthBundle, str]:
"""Perform token + token-exchange to obtain an OpenAI API key.
Returns (AuthBundle, success_url).
"""
# 1. Authorization-code -> (id_token, access_token, refresh_token)
data = urllib.parse.urlencode(
{
"grant_type": "authorization_code",
"code": code,
"redirect_uri": self.server.redirect_uri,
"client_id": self.server.client_id,
"code_verifier": self.server.pkce.code_verifier,
}
).encode()
token_data: TokenData
with urllib.request.urlopen(
urllib.request.Request(
self.server.token_endpoint,
data=data,
method="POST",
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
) as resp:
payload = json.loads(resp.read().decode())
# Extract chatgpt_account_id from id_token
id_token_parts = payload["id_token"].split(".")
if len(id_token_parts) != 3:
raise ValueError("Invalid ID token")
id_token_claims = _decode_jwt_segment(id_token_parts[1])
auth_claims = id_token_claims.get("https://api.openai.com/auth", {})
chatgpt_account_id = auth_claims.get("chatgpt_account_id", "")
token_data = TokenData(
id_token=payload["id_token"],
access_token=payload["access_token"],
refresh_token=payload["refresh_token"],
account_id=chatgpt_account_id,
)
access_token_parts = token_data.access_token.split(".")
if len(access_token_parts) != 3:
raise ValueError("Invalid access token")
access_token_claims = _decode_jwt_segment(access_token_parts[1])
token_claims = id_token_claims.get("https://api.openai.com/auth", {})
access_claims = access_token_claims.get("https://api.openai.com/auth", {})
exchanged_access_token, success_url = self._obtain_api_key(
token_claims, access_claims, token_data
)
# Persist refresh_token/id_token for future use (redeem credits etc.)
last_refresh_str = (
datetime.datetime.now(datetime.timezone.utc)
@@ -371,7 +353,7 @@ class _ApiKeyHTTPHandler(http.server.BaseHTTPRequestHandler):
last_refresh=last_refresh_str,
)
return (auth_bundle, success_url or f"{URL_BASE}/success")
return (auth_bundle, success_url)
def request_shutdown(self) -> None:
# shutdown() must be invoked from another thread to avoid
@@ -431,7 +413,6 @@ class _ApiKeyHTTPServer(http.server.HTTPServer):
request_handler_class: type[http.server.BaseHTTPRequestHandler],
*,
codex_home: str,
client_id: str,
verbose: bool = False,
) -> None:
super().__init__(server_address, request_handler_class, bind_and_activate=True)
@@ -441,8 +422,7 @@ class _ApiKeyHTTPServer(http.server.HTTPServer):
self.verbose: bool = verbose
self.issuer: str = DEFAULT_ISSUER
self.token_endpoint: str = f"{self.issuer}/oauth/token"
self.client_id: str = client_id
self.client_id: str = DEFAULT_CLIENT_ID
port = server_address[1]
self.redirect_uri: str = f"http://localhost:{port}/auth/callback"
self.pkce: PkceCodes = _generate_pkce()
@@ -601,8 +581,8 @@ def maybe_redeem_credits(
granted = redeem_data.get("granted_chatgpt_subscriber_api_credits", 0)
if granted and granted > 0:
eprint(
f"""Thanks for being a ChatGPT {"Plus" if plan_type == "plus" else "Pro"} subscriber!
If you haven't already redeemed, you should receive {"$5" if plan_type == "plus" else "$50"} in API credits.
f"""Thanks for being a ChatGPT {'Plus' if plan_type=='plus' else 'Pro'} subscriber!
If you haven't already redeemed, you should receive {'$5' if plan_type=='plus' else '$50'} in API credits.
Credits: https://platform.openai.com/settings/organization/billing/credit-grants
More info: https://help.openai.com/en/articles/11381614""",

View File

@@ -34,7 +34,6 @@ tokio = { version = "1", features = [
"signal",
] }
uuid = { version = "1", features = ["serde", "v4"] }
strum_macros = "0.27.2"
[dev-dependencies]
assert_cmd = "2"

View File

@@ -50,10 +50,6 @@ pub struct CodexToolCallParam {
/// The set of instructions to use instead of the default ones.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub base_instructions: Option<String>,
/// Whether to include the plan tool in the conversation.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub include_plan_tool: Option<bool>,
}
/// Custom enum mirroring [`AskForApproval`], but has an extra dependency on
@@ -144,10 +140,9 @@ impl CodexToolCallParam {
sandbox,
config: cli_overrides,
base_instructions,
include_plan_tool,
} = self;
// Build the `ConfigOverrides` recognized by codex-core.
// Build the `ConfigOverrides` recognised by codex-core.
let overrides = codex_core::config::ConfigOverrides {
model,
config_profile: profile,
@@ -157,7 +152,6 @@ impl CodexToolCallParam {
model_provider: None,
codex_linux_sandbox_exe,
base_instructions,
include_plan_tool,
};
let cli_overrides = cli_overrides
@@ -268,10 +262,6 @@ mod tests {
"description": "Working directory for the session. If relative, it is resolved against the server process's current working directory.",
"type": "string"
},
"include-plan-tool": {
"description": "Whether to include the plan tool in the conversation.",
"type": "boolean"
},
"model": {
"description": "Optional override for the model name (e.g. \"o3\", \"o4-mini\").",
"type": "string"

View File

@@ -263,7 +263,6 @@ async fn run_codex_tool_session_inner(
| EventMsg::PatchApplyBegin(_)
| EventMsg::PatchApplyEnd(_)
| EventMsg::GetHistoryEntryResponse(_)
| EventMsg::PlanUpdate(_)
| EventMsg::ShutdownComplete => {
// For now, we do not do anything extra for these
// events. Note that

View File

@@ -19,7 +19,6 @@ mod codex_tool_config;
mod codex_tool_runner;
mod exec_approval;
mod json_to_toml;
mod mcp_protocol;
mod message_processor;
mod outgoing_message;
mod patch_approval;

View File

@@ -1,962 +0,0 @@
use codex_core::config_types::SandboxMode;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
use serde::Deserialize;
use serde::Serialize;
use strum_macros::Display;
use uuid::Uuid;
use mcp_types::RequestId;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(transparent)]
pub struct ConversationId(pub Uuid);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(transparent)]
pub struct MessageId(pub Uuid);
// Requests
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ToolCallRequest {
#[serde(rename = "jsonrpc")]
pub jsonrpc: &'static str,
pub id: RequestId,
pub method: &'static str,
pub params: ToolCallRequestParams,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(tag = "name", content = "arguments", rename_all = "camelCase")]
pub enum ToolCallRequestParams {
ConversationCreate(ConversationCreateArgs),
ConversationStream(ConversationStreamArgs),
ConversationSendMessage(ConversationSendMessageArgs),
ConversationsList(ConversationsListArgs),
}
impl ToolCallRequestParams {
/// Wrap this request in a JSON-RPC request.
#[allow(dead_code)]
pub fn into_request(self, id: RequestId) -> ToolCallRequest {
ToolCallRequest {
jsonrpc: "2.0",
id,
method: "tools/call",
params: self,
}
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ConversationCreateArgs {
pub prompt: String,
pub model: String,
pub cwd: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub approval_policy: Option<AskForApproval>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sandbox: Option<SandboxMode>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub config: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub profile: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub base_instructions: Option<String>,
}
/// Optional overrides for an existing conversation's execution context when sending a message.
/// Fields left as `None` inherit the current conversation/session settings.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ConversationOverrides {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cwd: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub approval_policy: Option<AskForApproval>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sandbox: Option<SandboxMode>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub config: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub profile: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub base_instructions: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ConversationStreamArgs {
pub conversation_id: ConversationId,
}
/// If omitted, the message continues from the latest turn.
/// Set to resume/edit from an earlier parent message in the thread.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ConversationSendMessageArgs {
pub conversation_id: ConversationId,
pub content: Vec<InputItem>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parent_message_id: Option<MessageId>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[serde(flatten)]
pub conversation_overrides: Option<ConversationOverrides>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ConversationsListArgs {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<u32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cursor: Option<String>,
}
// Responses
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ToolCallResponse {
pub request_id: RequestId,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub is_error: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub result: Option<ToolCallResponseResult>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(untagged)]
pub enum ToolCallResponseResult {
ConversationCreate(ConversationCreateResult),
ConversationStream(ConversationStreamResult),
ConversationSendMessage(ConversationSendMessageResult),
ConversationsList(ConversationsListResult),
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ConversationCreateResult {
pub conversation_id: ConversationId,
pub model: String,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ConversationStreamResult {}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ConversationSendMessageResult {
pub success: bool,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ConversationsListResult {
pub conversations: Vec<ConversationSummary>,
#[serde(skip_serializing_if = "Option::is_none")]
pub next_cursor: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ConversationSummary {
pub conversation_id: ConversationId,
pub title: String,
}
// Notifications
#[derive(Debug, Clone, Deserialize, Display)]
pub enum ServerNotification {
InitialState(InitialStateNotificationParams),
StreamDisconnected(StreamDisconnectedNotificationParams),
CodexEvent(Box<CodexEventNotificationParams>),
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct NotificationMeta {
#[serde(skip_serializing_if = "Option::is_none")]
pub conversation_id: Option<ConversationId>,
#[serde(skip_serializing_if = "Option::is_none")]
pub request_id: Option<RequestId>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InitialStateNotificationParams {
#[serde(rename = "_meta", skip_serializing_if = "Option::is_none")]
pub meta: Option<NotificationMeta>,
pub initial_state: InitialStatePayload,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InitialStatePayload {
#[serde(default)]
pub events: Vec<CodexEventNotificationParams>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct StreamDisconnectedNotificationParams {
#[serde(rename = "_meta", skip_serializing_if = "Option::is_none")]
pub meta: Option<NotificationMeta>,
pub reason: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CodexEventNotificationParams {
#[serde(rename = "_meta", skip_serializing_if = "Option::is_none")]
pub meta: Option<NotificationMeta>,
pub msg: EventMsg,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CancelNotificationParams {
pub request_id: RequestId,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
}
impl Serialize for ServerNotification {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
use serde::ser::SerializeMap;
let mut map = serializer.serialize_map(Some(2))?;
match self {
ServerNotification::CodexEvent(p) => {
map.serialize_entry("method", &format!("notifications/{}", p.msg))?;
map.serialize_entry("params", p)?;
}
ServerNotification::InitialState(p) => {
map.serialize_entry("method", "notifications/initial_state")?;
map.serialize_entry("params", p)?;
}
ServerNotification::StreamDisconnected(p) => {
map.serialize_entry("method", "notifications/stream_disconnected")?;
map.serialize_entry("params", p)?;
}
}
map.end()
}
}
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
pub enum ClientNotification {
#[serde(rename = "notifications/cancelled")]
Cancelled(CancelNotificationParams),
}
#[cfg(test)]
#[allow(clippy::expect_used)]
#[allow(clippy::unwrap_used)]
mod tests {
use std::path::PathBuf;
use super::*;
use codex_core::protocol::McpInvocation;
use codex_core::protocol::McpToolCallBeginEvent;
use pretty_assertions::assert_eq;
use serde::Serialize;
use serde_json::Value;
use serde_json::json;
use uuid::uuid;
fn to_val<T: Serialize>(v: &T) -> Value {
serde_json::to_value(v).expect("serialize to Value")
}
// ----- Requests -----
#[test]
fn serialize_tool_call_request_params_conversation_create_minimal() {
let req = ToolCallRequestParams::ConversationCreate(ConversationCreateArgs {
prompt: "".into(),
model: "o3".into(),
cwd: "/repo".into(),
approval_policy: None,
sandbox: None,
config: None,
profile: None,
base_instructions: None,
});
let observed = to_val(&req.into_request(mcp_types::RequestId::Integer(2)));
let expected = json!({
"jsonrpc": "2.0",
"id": 2,
"method": "tools/call",
"params": {
"name": "conversationCreate",
"arguments": {
"prompt": "",
"model": "o3",
"cwd": "/repo"
}
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_tool_call_request_params_conversation_send_message_with_overrides_and_parent_message_id()
{
let req = ToolCallRequestParams::ConversationSendMessage(ConversationSendMessageArgs {
conversation_id: ConversationId(uuid!("d0f6ecbe-84a2-41c1-b23d-b20473b25eab")),
content: vec![
InputItem::Text { text: "Hi".into() },
InputItem::Image {
image_url: "https://example.com/cat.jpg".into(),
},
InputItem::LocalImage {
path: "notes.txt".into(),
},
],
parent_message_id: Some(MessageId(uuid!("67e55044-10b1-426f-9247-bb680e5fe0c8"))),
conversation_overrides: Some(ConversationOverrides {
model: Some("o4-mini".into()),
cwd: Some("/workdir".into()),
approval_policy: None,
sandbox: Some(SandboxMode::DangerFullAccess),
config: Some(json!({"temp": 0.2})),
profile: Some("eng".into()),
base_instructions: Some("Be terse".into()),
}),
});
let observed = to_val(&req.into_request(mcp_types::RequestId::Integer(2)));
let expected = json!({
"jsonrpc": "2.0",
"id": 2,
"method": "tools/call",
"params": {
"name": "conversationSendMessage",
"arguments": {
"conversation_id": "d0f6ecbe-84a2-41c1-b23d-b20473b25eab",
"content": [
{ "type": "text", "text": "Hi" },
{ "type": "image", "image_url": "https://example.com/cat.jpg" },
{ "type": "local_image", "path": "notes.txt" }
],
"parent_message_id": "67e55044-10b1-426f-9247-bb680e5fe0c8",
"model": "o4-mini",
"cwd": "/workdir",
"sandbox": "danger-full-access",
"config": { "temp": 0.2 },
"profile": "eng",
"base_instructions": "Be terse"
}
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_tool_call_request_params_conversations_list_with_opts() {
let req = ToolCallRequestParams::ConversationsList(ConversationsListArgs {
limit: Some(50),
cursor: Some("abc".into()),
});
let observed = to_val(&req.into_request(RequestId::Integer(2)));
let expected = json!({
"jsonrpc": "2.0",
"id": 2,
"method": "tools/call",
"params": {
"name": "conversationsList",
"arguments": {
"limit": 50,
"cursor": "abc"
}
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_tool_call_request_params_conversation_stream() {
let req = ToolCallRequestParams::ConversationStream(ConversationStreamArgs {
conversation_id: ConversationId(uuid!("67e55044-10b1-426f-9247-bb680e5fe0c8")),
});
let observed = to_val(&req.into_request(mcp_types::RequestId::Integer(2)));
let expected = json!({
"jsonrpc": "2.0",
"id": 2,
"method": "tools/call",
"params": {
"name": "conversationStream",
"arguments": {
"conversation_id": "67e55044-10b1-426f-9247-bb680e5fe0c8"
}
}
});
assert_eq!(observed, expected);
}
// ----- Message inputs / sources -----
#[test]
fn serialize_message_input_image_url() {
let item = InputItem::Image {
image_url: "https://example.com/x.png".into(),
};
let observed = to_val(&item);
let expected = json!({
"type": "image",
"image_url": "https://example.com/x.png"
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_message_input_local_image_path() {
let url = InputItem::LocalImage {
path: PathBuf::from("https://example.com/a.pdf"),
};
let id = InputItem::LocalImage {
path: PathBuf::from("file_456"),
};
let observed_url = to_val(&url);
let expected_url = json!({"type":"local_image","path":"https://example.com/a.pdf"});
assert_eq!(
observed_url, expected_url,
"LocalImage with URL path should serialize as image_url"
);
let observed_id = to_val(&id);
let expected_id = json!({"type":"local_image","path":"file_456"});
assert_eq!(
observed_id, expected_id,
"LocalImage with file id should serialize as image_url"
);
}
#[test]
fn serialize_message_input_image_url_without_detail() {
let item = InputItem::Image {
image_url: "https://example.com/x.png".into(),
};
let observed = to_val(&item);
let expected = json!({
"type": "image",
"image_url": "https://example.com/x.png"
});
assert_eq!(observed, expected);
}
// ----- Responses -----
#[test]
fn response_success_conversation_create_full_schema() {
let env = ToolCallResponse {
request_id: RequestId::Integer(1),
is_error: None,
result: Some(ToolCallResponseResult::ConversationCreate(
ConversationCreateResult {
conversation_id: ConversationId(uuid!("d0f6ecbe-84a2-41c1-b23d-b20473b25eab")),
model: "o3".into(),
},
)),
};
let observed = to_val(&env);
let expected = json!({
"requestId": 1,
"result": {
"conversation_id": "d0f6ecbe-84a2-41c1-b23d-b20473b25eab",
"model": "o3"
}
});
assert_eq!(
observed, expected,
"response (ConversationCreate) must match"
);
}
#[test]
fn response_success_conversation_stream_empty_result_object() {
let env = ToolCallResponse {
request_id: RequestId::Integer(2),
is_error: None,
result: Some(ToolCallResponseResult::ConversationStream(
ConversationStreamResult {},
)),
};
let observed = to_val(&env);
let expected = json!({
"requestId": 2,
"result": {}
});
assert_eq!(
observed, expected,
"response (ConversationStream) must have empty object result"
);
}
#[test]
fn response_success_send_message_accepted_full_schema() {
let env = ToolCallResponse {
request_id: RequestId::Integer(3),
is_error: None,
result: Some(ToolCallResponseResult::ConversationSendMessage(
ConversationSendMessageResult { success: true },
)),
};
let observed = to_val(&env);
let expected = json!({
"requestId": 3,
"result": { "success": true }
});
assert_eq!(
observed, expected,
"response (ConversationSendMessageAccepted) must match"
);
}
#[test]
fn response_success_conversations_list_with_next_cursor_full_schema() {
let env = ToolCallResponse {
request_id: RequestId::Integer(4),
is_error: None,
result: Some(ToolCallResponseResult::ConversationsList(
ConversationsListResult {
conversations: vec![ConversationSummary {
conversation_id: ConversationId(uuid!(
"67e55044-10b1-426f-9247-bb680e5fe0c8"
)),
title: "Refactor config loader".into(),
}],
next_cursor: Some("next123".into()),
},
)),
};
let observed = to_val(&env);
let expected = json!({
"requestId": 4,
"result": {
"conversations": [
{
"conversation_id": "67e55044-10b1-426f-9247-bb680e5fe0c8",
"title": "Refactor config loader"
}
],
"next_cursor": "next123"
}
});
assert_eq!(
observed, expected,
"response (ConversationsList with cursor) must match"
);
}
#[test]
fn response_error_only_is_error_and_request_id_string() {
let env = ToolCallResponse {
request_id: RequestId::Integer(4),
is_error: Some(true),
result: None,
};
let observed = to_val(&env);
let expected = json!({
"requestId": 4,
"isError": true
});
assert_eq!(
observed, expected,
"error response must omit `result` and include `isError`"
);
}
// ----- Notifications -----
#[test]
fn serialize_notification_initial_state_minimal() {
let params = InitialStateNotificationParams {
meta: Some(NotificationMeta {
conversation_id: Some(ConversationId(uuid!(
"67e55044-10b1-426f-9247-bb680e5fe0c8"
))),
request_id: Some(RequestId::Integer(44)),
}),
initial_state: InitialStatePayload {
events: vec![
CodexEventNotificationParams {
meta: None,
msg: EventMsg::TaskStarted,
},
CodexEventNotificationParams {
meta: None,
msg: EventMsg::AgentMessageDelta(
codex_core::protocol::AgentMessageDeltaEvent {
delta: "Loading...".into(),
},
),
},
],
},
};
let observed = to_val(&ServerNotification::InitialState(params.clone()));
let expected = json!({
"method": "notifications/initial_state",
"params": {
"_meta": {
"conversationId": "67e55044-10b1-426f-9247-bb680e5fe0c8",
"requestId": 44
},
"initial_state": {
"events": [
{ "msg": { "type": "task_started" } },
{ "msg": { "type": "agent_message_delta", "delta": "Loading..." } }
]
}
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_notification_initial_state_omits_empty_events_full_json() {
let params = InitialStateNotificationParams {
meta: None,
initial_state: InitialStatePayload { events: vec![] },
};
let observed = to_val(&ServerNotification::InitialState(params));
let expected = json!({
"method": "notifications/initial_state",
"params": {
"initial_state": { "events": [] }
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_notification_stream_disconnected() {
let params = StreamDisconnectedNotificationParams {
meta: Some(NotificationMeta {
conversation_id: Some(ConversationId(uuid!(
"67e55044-10b1-426f-9247-bb680e5fe0c8"
))),
request_id: None,
}),
reason: "New stream() took over".into(),
};
let observed = to_val(&ServerNotification::StreamDisconnected(params));
let expected = json!({
"method": "notifications/stream_disconnected",
"params": {
"_meta": { "conversationId": "67e55044-10b1-426f-9247-bb680e5fe0c8" },
"reason": "New stream() took over"
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_notification_codex_event_uses_eventmsg_type_in_method() {
let params = CodexEventNotificationParams {
meta: Some(NotificationMeta {
conversation_id: Some(ConversationId(uuid!(
"67e55044-10b1-426f-9247-bb680e5fe0c8"
))),
request_id: Some(RequestId::Integer(44)),
}),
msg: EventMsg::AgentMessage(codex_core::protocol::AgentMessageEvent {
message: "hi".into(),
}),
};
let observed = to_val(&ServerNotification::CodexEvent(Box::new(params)));
let expected = json!({
"method": "notifications/agent_message",
"params": {
"_meta": {
"conversationId": "67e55044-10b1-426f-9247-bb680e5fe0c8",
"requestId": 44
},
"msg": { "type": "agent_message", "message": "hi" }
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_notification_codex_event_task_started_full_json() {
let params = CodexEventNotificationParams {
meta: Some(NotificationMeta {
conversation_id: Some(ConversationId(uuid!(
"67e55044-10b1-426f-9247-bb680e5fe0c8"
))),
request_id: Some(RequestId::Integer(7)),
}),
msg: EventMsg::TaskStarted,
};
let observed = to_val(&ServerNotification::CodexEvent(Box::new(params)));
let expected = json!({
"method": "notifications/task_started",
"params": {
"_meta": {
"conversationId": "67e55044-10b1-426f-9247-bb680e5fe0c8",
"requestId": 7
},
"msg": { "type": "task_started" }
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_notification_codex_event_agent_message_delta_full_json() {
let params = CodexEventNotificationParams {
meta: None,
msg: EventMsg::AgentMessageDelta(codex_core::protocol::AgentMessageDeltaEvent {
delta: "stream...".into(),
}),
};
let observed = to_val(&ServerNotification::CodexEvent(Box::new(params)));
let expected = json!({
"method": "notifications/agent_message_delta",
"params": {
"msg": { "type": "agent_message_delta", "delta": "stream..." }
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_notification_codex_event_agent_message_full_json() {
let params = CodexEventNotificationParams {
meta: Some(NotificationMeta {
conversation_id: Some(ConversationId(uuid!(
"67e55044-10b1-426f-9247-bb680e5fe0c8"
))),
request_id: Some(RequestId::Integer(44)),
}),
msg: EventMsg::AgentMessage(codex_core::protocol::AgentMessageEvent {
message: "hi".into(),
}),
};
let observed = to_val(&ServerNotification::CodexEvent(Box::new(params)));
let expected = json!({
"method": "notifications/agent_message",
"params": {
"_meta": {
"conversationId": "67e55044-10b1-426f-9247-bb680e5fe0c8",
"requestId": 44
},
"msg": { "type": "agent_message", "message": "hi" }
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_notification_codex_event_agent_reasoning_full_json() {
let params = CodexEventNotificationParams {
meta: None,
msg: EventMsg::AgentReasoning(codex_core::protocol::AgentReasoningEvent {
text: "thinking…".into(),
}),
};
let observed = to_val(&ServerNotification::CodexEvent(Box::new(params)));
let expected = json!({
"method": "notifications/agent_reasoning",
"params": {
"msg": { "type": "agent_reasoning", "text": "thinking…" }
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_notification_codex_event_token_count_full_json() {
let usage = codex_core::protocol::TokenUsage {
input_tokens: 10,
cached_input_tokens: Some(2),
output_tokens: 5,
reasoning_output_tokens: Some(1),
total_tokens: 16,
};
let params = CodexEventNotificationParams {
meta: None,
msg: EventMsg::TokenCount(usage),
};
let observed = to_val(&ServerNotification::CodexEvent(Box::new(params)));
let expected = json!({
"method": "notifications/token_count",
"params": {
"msg": {
"type": "token_count",
"input_tokens": 10,
"cached_input_tokens": 2,
"output_tokens": 5,
"reasoning_output_tokens": 1,
"total_tokens": 16
}
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_notification_codex_event_session_configured_full_json() {
let params = CodexEventNotificationParams {
meta: Some(NotificationMeta {
conversation_id: Some(ConversationId(uuid!(
"67e55044-10b1-426f-9247-bb680e5fe0c8"
))),
request_id: None,
}),
msg: EventMsg::SessionConfigured(codex_core::protocol::SessionConfiguredEvent {
session_id: uuid!("67e55044-10b1-426f-9247-bb680e5fe0c8"),
model: "codex-mini-latest".into(),
history_log_id: 42,
history_entry_count: 3,
}),
};
let observed = to_val(&ServerNotification::CodexEvent(Box::new(params)));
let expected = json!({
"method": "notifications/session_configured",
"params": {
"_meta": { "conversationId": "67e55044-10b1-426f-9247-bb680e5fe0c8" },
"msg": {
"type": "session_configured",
"session_id": "67e55044-10b1-426f-9247-bb680e5fe0c8",
"model": "codex-mini-latest",
"history_log_id": 42,
"history_entry_count": 3
}
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_notification_codex_event_exec_command_begin_full_json() {
let params = CodexEventNotificationParams {
meta: None,
msg: EventMsg::ExecCommandBegin(codex_core::protocol::ExecCommandBeginEvent {
call_id: "c1".into(),
command: vec!["bash".into(), "-lc".into(), "echo hi".into()],
cwd: std::path::PathBuf::from("/work"),
}),
};
let observed = to_val(&ServerNotification::CodexEvent(Box::new(params)));
let expected = json!({
"method": "notifications/exec_command_begin",
"params": {
"msg": {
"type": "exec_command_begin",
"call_id": "c1",
"command": ["bash", "-lc", "echo hi"],
"cwd": "/work"
}
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_notification_codex_event_mcp_tool_call_begin_full_json() {
let params = CodexEventNotificationParams {
meta: None,
msg: EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
call_id: "m1".into(),
invocation: McpInvocation {
server: "calc".into(),
tool: "add".into(),
arguments: Some(json!({"a":1,"b":2})),
},
}),
};
let observed = to_val(&ServerNotification::CodexEvent(Box::new(params)));
let expected = json!({
"method": "notifications/mcp_tool_call_begin",
"params": {
"msg": {
"type": "mcp_tool_call_begin",
"call_id": "m1",
"invocation": {
"server": "calc",
"tool": "add",
"arguments": { "a": 1, "b": 2 }
}
}
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_notification_codex_event_patch_apply_end_full_json() {
let params = CodexEventNotificationParams {
meta: None,
msg: EventMsg::PatchApplyEnd(codex_core::protocol::PatchApplyEndEvent {
call_id: "p1".into(),
stdout: "ok".into(),
stderr: "".into(),
success: true,
}),
};
let observed = to_val(&ServerNotification::CodexEvent(Box::new(params)));
let expected = json!({
"method": "notifications/patch_apply_end",
"params": {
"msg": {
"type": "patch_apply_end",
"call_id": "p1",
"stdout": "ok",
"stderr": "",
"success": true
}
}
});
assert_eq!(observed, expected);
}
// ----- Cancelled notifications -----
#[test]
fn serialize_notification_cancelled_with_reason_full_json() {
let params = CancelNotificationParams {
request_id: RequestId::String("r-123".into()),
reason: Some("user_cancelled".into()),
};
let observed = to_val(&ClientNotification::Cancelled(params));
let expected = json!({
"method": "notifications/cancelled",
"params": {
"requestId": "r-123",
"reason": "user_cancelled"
}
});
assert_eq!(observed, expected);
}
#[test]
fn serialize_notification_cancelled_without_reason_full_json() {
let params = CancelNotificationParams {
request_id: RequestId::Integer(77),
reason: None,
};
let observed = to_val(&ClientNotification::Cancelled(params));
// Check exact structure: reason must be omitted.
assert_eq!(observed["method"], "notifications/cancelled");
assert_eq!(observed["params"]["requestId"], 77);
assert!(
observed["params"].get("reason").is_none(),
"reason must be omitted when None"
);
}
}

View File

@@ -81,7 +81,6 @@ async fn shell_command_interruption() -> anyhow::Result<()> {
sandbox: None,
config: None,
base_instructions: None,
include_plan_tool: None,
})
.await?;

View File

@@ -65,3 +65,4 @@ uuid = "1"
[dev-dependencies]
insta = "1.43.1"
pretty_assertions = "1"
tempfile = "3.13.0"

View File

@@ -5,15 +5,17 @@ use crate::file_search::FileSearchManager;
use crate::get_git_diff::get_git_diff;
use crate::git_warning_screen::GitWarningOutcome;
use crate::git_warning_screen::GitWarningScreen;
use crate::login_screen::LoginScreen;
use crate::scroll_event_helper::ScrollEventHelper;
use crate::slash_command::SlashCommand;
use crate::tui;
use codex_core::config::Config;
use codex_core::protocol::Event;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;
use color_eyre::eyre::Result;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::MouseEvent;
use crossterm::event::MouseEventKind;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
@@ -35,6 +37,8 @@ enum AppState<'a> {
/// `AppState`.
widget: Box<ChatWidget<'a>>,
},
/// The login screen for the OpenAI provider.
Login { screen: LoginScreen },
/// The start-up warning that recommends running codex inside a Git repo.
GitWarning { screen: GitWarningScreen },
}
@@ -55,21 +59,31 @@ pub(crate) struct App<'a> {
/// Stored parameters needed to instantiate the ChatWidget later, e.g.,
/// after dismissing the Git-repo warning.
chat_args: Option<ChatWidgetArgs>,
}
/// Tracks pending summarization requests for the compact feature.
pending_summarization: Option<PendingSummarization>,
/// Aggregate parameters needed to create a `ChatWidget`, as creation may be
/// deferred until after the Git warning screen is dismissed.
#[derive(Clone)]
struct ChatWidgetArgs {
config: Config,
initial_prompt: Option<String>,
initial_images: Vec<PathBuf>,
prompt_label: Option<String>,
}
impl App<'_> {
pub(crate) fn new(
config: Config,
initial_prompt: Option<String>,
show_login_screen: bool,
show_git_warning: bool,
initial_images: Vec<std::path::PathBuf>,
prompt_label: Option<String>,
) -> Self {
let (app_event_tx, app_event_rx) = channel();
let app_event_tx = AppEventSender::new(app_event_tx);
let pending_redraw = Arc::new(AtomicBool::new(false));
let scroll_event_helper = ScrollEventHelper::new(app_event_tx.clone());
// Spawn a dedicated thread for reading the crossterm event loop and
// re-publishing the events as AppEvents, as appropriate.
@@ -92,6 +106,18 @@ impl App<'_> {
crossterm::event::Event::Resize(_, _) => {
app_event_tx.send(AppEvent::RequestRedraw);
}
crossterm::event::Event::Mouse(MouseEvent {
kind: MouseEventKind::ScrollUp,
..
}) => {
scroll_event_helper.scroll_up();
}
crossterm::event::Event::Mouse(MouseEvent {
kind: MouseEventKind::ScrollDown,
..
}) => {
scroll_event_helper.scroll_down();
}
crossterm::event::Event::Paste(pasted) => {
// Many terminals convert newlines to \r when
// pasting, e.g. [iTerm2][]. But [tui-textarea
@@ -114,7 +140,19 @@ impl App<'_> {
});
}
let (app_state, chat_args) = if show_git_warning {
let (app_state, chat_args) = if show_login_screen {
(
AppState::Login {
screen: LoginScreen::new(app_event_tx.clone(), config.codex_home.clone()),
},
Some(ChatWidgetArgs {
config: config.clone(),
initial_prompt,
initial_images,
prompt_label: prompt_label.clone(),
}),
)
} else if show_git_warning {
(
AppState::GitWarning {
screen: GitWarningScreen::new(),
@@ -123,6 +161,7 @@ impl App<'_> {
config: config.clone(),
initial_prompt,
initial_images,
prompt_label: prompt_label.clone(),
}),
)
} else {
@@ -131,6 +170,7 @@ impl App<'_> {
app_event_tx.clone(),
initial_prompt,
initial_images,
prompt_label.clone(),
);
(
AppState::Chat {
@@ -149,7 +189,6 @@ impl App<'_> {
file_search,
pending_redraw,
chat_args,
pending_summarization: None,
}
}
@@ -209,7 +248,7 @@ impl App<'_> {
AppState::Chat { widget } => {
widget.on_ctrl_c();
}
AppState::GitWarning { .. } => {
AppState::Login { .. } | AppState::GitWarning { .. } => {
// No-op.
}
}
@@ -230,7 +269,7 @@ impl App<'_> {
self.dispatch_key_event(key_event);
}
}
AppState::GitWarning { .. } => {
AppState::Login { .. } | AppState::GitWarning { .. } => {
self.app_event_tx.send(AppEvent::ExitRequest);
}
}
@@ -240,6 +279,9 @@ impl App<'_> {
}
};
}
AppEvent::Scroll(scroll_delta) => {
self.dispatch_scroll_event(scroll_delta);
}
AppEvent::Paste(text) => {
self.dispatch_paste_event(text);
}
@@ -251,11 +293,11 @@ impl App<'_> {
}
AppEvent::CodexOp(op) => match &mut self.app_state {
AppState::Chat { widget } => widget.submit_op(op),
AppState::GitWarning { .. } => {}
AppState::Login { .. } | AppState::GitWarning { .. } => {}
},
AppEvent::LatestLog(line) => match &mut self.app_state {
AppState::Chat { widget } => widget.update_latest_log(line),
AppState::GitWarning { .. } => {}
AppState::Login { .. } | AppState::GitWarning { .. } => {}
},
AppEvent::DispatchCommand(command) => match command {
SlashCommand::New => {
@@ -264,22 +306,11 @@ impl App<'_> {
self.app_event_tx.clone(),
None,
Vec::new(),
None,
));
self.app_state = AppState::Chat { widget: new_widget };
self.app_event_tx.send(AppEvent::RequestRedraw);
}
SlashCommand::Compact => {
if let AppState::Chat { widget } = &mut self.app_state {
// Submit the summarization request to the current widget
widget.submit_op(Op::SummarizeContext);
// Set up tracking for the summary response
self.pending_summarization = Some(PendingSummarization {
summary_buffer: String::new(),
started_receiving: false,
});
}
}
SlashCommand::Quit => {
break;
}
@@ -323,7 +354,9 @@ impl App<'_> {
pub(crate) fn token_usage(&self) -> codex_core::protocol::TokenUsage {
match &self.app_state {
AppState::Chat { widget } => widget.token_usage().clone(),
AppState::GitWarning { .. } => codex_core::protocol::TokenUsage::default(),
AppState::Login { .. } | AppState::GitWarning { .. } => {
codex_core::protocol::TokenUsage::default()
}
}
}
@@ -334,6 +367,9 @@ impl App<'_> {
AppState::Chat { widget } => {
terminal.draw(|frame| frame.render_widget_ref(&**widget, frame.area()))?;
}
AppState::Login { screen } => {
terminal.draw(|frame| frame.render_widget_ref(&*screen, frame.area()))?;
}
AppState::GitWarning { screen } => {
terminal.draw(|frame| frame.render_widget_ref(&*screen, frame.area()))?;
}
@@ -348,6 +384,7 @@ impl App<'_> {
AppState::Chat { widget } => {
widget.handle_key_event(key_event);
}
AppState::Login { screen } => screen.handle_key_event(key_event),
AppState::GitWarning { screen } => match screen.handle_key_event(key_event) {
GitWarningOutcome::Continue => {
// User accepted switch to chat view.
@@ -361,6 +398,7 @@ impl App<'_> {
self.app_event_tx.clone(),
args.initial_prompt,
args.initial_images,
args.prompt_label,
));
self.app_state = AppState::Chat { widget };
self.app_event_tx.send(AppEvent::RequestRedraw);
@@ -378,118 +416,21 @@ impl App<'_> {
fn dispatch_paste_event(&mut self, pasted: String) {
match &mut self.app_state {
AppState::Chat { widget } => widget.handle_paste(pasted),
AppState::GitWarning { .. } => {}
AppState::Login { .. } | AppState::GitWarning { .. } => {}
}
}
fn dispatch_scroll_event(&mut self, scroll_delta: i32) {
match &mut self.app_state {
AppState::Chat { widget } => widget.handle_scroll_delta(scroll_delta),
AppState::Login { .. } | AppState::GitWarning { .. } => {}
}
}
fn dispatch_codex_event(&mut self, event: Event) {
// First check if we're waiting for a summarization response
if self.pending_summarization.is_some() {
self.handle_summarization_response(event);
return;
}
// Otherwise dispatch to the current app state
match &mut self.app_state {
AppState::Chat { widget } => widget.handle_codex_event(event),
AppState::GitWarning { .. } => {}
}
}
/// Handles responses during a summarization request.
fn handle_summarization_response(&mut self, event: Event) {
match &event.msg {
EventMsg::AgentMessage(msg) => {
// Only collect messages once we've started receiving the summarization
if let Some(ref mut pending) = self.pending_summarization {
// Start collecting once we see a message that looks like a summary
if !pending.started_receiving && msg.message.contains("summarize") {
pending.started_receiving = true;
}
if pending.started_receiving {
pending.summary_buffer.push_str(&msg.message);
pending.summary_buffer.push('\n');
}
}
}
EventMsg::TaskComplete(_) => {
// Task is complete, now create a new widget with the summary
if let Some(pending) = self.pending_summarization.take() {
let summary = create_compact_summary_prompt(&pending.summary_buffer);
// Create new widget with summary as initial prompt
let new_widget = Box::new(ChatWidget::new(
self.config.clone(),
self.app_event_tx.clone(),
Some(summary),
Vec::new(),
));
self.app_state = AppState::Chat { widget: new_widget };
self.app_event_tx.send(AppEvent::Redraw);
}
}
_ => {}
AppState::Login { .. } | AppState::GitWarning { .. } => {}
}
}
}
/// State for tracking a pending summarization request.
struct PendingSummarization {
/// Buffer to collect the summary response.
summary_buffer: String,
/// Whether we've received the first message of the summarization response.
started_receiving: bool,
}
/// Aggregate parameters needed to create a `ChatWidget`, as creation may be
/// deferred until after the Git warning screen is dismissed.
#[derive(Clone)]
struct ChatWidgetArgs {
config: Config,
initial_prompt: Option<String>,
initial_images: Vec<PathBuf>,
}
/// Creates the initial prompt for a compacted conversation.
fn create_compact_summary_prompt(summary_text: &str) -> String {
if summary_text.trim().is_empty() {
"Previous conversation has been summarized.".to_string()
} else {
format!(
r#"This chat is a continuation of a previous conversation. After providing the summary, acknowledge that /compact command has been applied. Here is the summary of the previous conversation:
{}"#,
summary_text.trim()
)
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used)]
use super::*;
#[test]
fn test_summary_buffer_accumulation() {
let mut buffer = String::new();
// Simulate the way we accumulate messages in pending_summarization
buffer.push_str("First message part");
buffer.push('\n');
buffer.push_str("Second message part");
buffer.push('\n');
buffer.push_str("Final message part");
let prompt = create_compact_summary_prompt(&buffer);
// Should contain all parts
assert!(prompt.contains("First message part"));
assert!(prompt.contains("Second message part"));
assert!(prompt.contains("Final message part"));
// Should preserve newlines in the content
let trimmed_buffer = buffer.trim();
assert!(prompt.contains(trimmed_buffer));
}
}

View File

@@ -6,7 +6,7 @@ use ratatui::text::Line;
use crate::slash_command::SlashCommand;
#[allow(clippy::large_enum_variant)]
pub enum AppEvent {
pub(crate) enum AppEvent {
CodexEvent(Event),
/// Request a redraw which will be debounced by the [`App`].
@@ -20,6 +20,10 @@ pub enum AppEvent {
/// Text pasted from the terminal clipboard.
Paste(String),
/// Scroll event with a value representing the "scroll delta" as the net
/// scroll up/down events within a short time window.
Scroll(i32),
/// Request to exit the application gracefully.
ExitRequest,

View File

@@ -3,18 +3,18 @@ use std::sync::mpsc::Sender;
use crate::app_event::AppEvent;
#[derive(Clone, Debug)]
pub struct AppEventSender {
pub(crate) struct AppEventSender {
app_event_tx: Sender<AppEvent>,
}
impl AppEventSender {
pub fn new(app_event_tx: Sender<AppEvent>) -> Self {
pub(crate) fn new(app_event_tx: Sender<AppEvent>) -> Self {
Self { app_event_tx }
}
/// Send an event to the app event channel. If it fails, we swallow the
/// error and log it.
pub fn send(&self, event: AppEvent) {
pub(crate) fn send(&self, event: AppEvent) {
if let Err(e) = self.app_event_tx.send(event) {
tracing::error!("failed to send event: {e}");
}

View File

@@ -33,7 +33,7 @@ pub enum InputResult {
None,
}
pub struct ChatComposer<'a> {
pub(crate) struct ChatComposer<'a> {
textarea: TextArea<'a>,
active_popup: ActivePopup,
app_event_tx: AppEventSender,
@@ -477,17 +477,6 @@ impl ChatComposer<'_> {
}
}
if let Input {
key: Key::Char('u'),
ctrl: true,
alt: false,
..
} = input
{
self.textarea.delete_line_by_head();
return (InputResult::None, true);
}
// Normal input handling
self.textarea.input(input);
let text_after = self.textarea.lines().join("\n");

View File

@@ -14,7 +14,7 @@ use ratatui::widgets::WidgetRef;
mod approval_modal_view;
mod bottom_pane_view;
pub mod chat_composer;
mod chat_composer;
mod chat_composer_history;
mod command_popup;
mod file_search_popup;

View File

@@ -0,0 +1,20 @@
use ratatui::prelude::*;
/// Trait implemented by every type that can live inside the conversation
/// history list. It provides two primitives that the parent scroll-view
/// needs: how *tall* the widget is at a given width and how to render an
/// arbitrary contiguous *window* of that widget.
///
/// The `first_visible_line` argument to [`render_window`] allows partial
/// rendering when the top of the widget is scrolled off-screen. The caller
/// guarantees that `first_visible_line + area.height as usize` never exceeds
/// the total height previously returned by [`height`].
pub(crate) trait CellWidget {
/// Total height measured in wrapped terminal lines when drawn with the
/// given *content* width (no scrollbar column included).
fn height(&self, width: u16) -> usize;
/// Render a *window* that starts `first_visible_line` lines below the top
/// of the widget. The windows size is given by `area`.
fn render_window(&self, first_visible_line: usize, area: Rect, buf: &mut Buffer);
}

View File

@@ -1,6 +1,5 @@
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use codex_core::codex_wrapper::CodexConversation;
use codex_core::codex_wrapper::init_codex;
@@ -37,9 +36,8 @@ use crate::bottom_pane::BottomPane;
use crate::bottom_pane::BottomPaneParams;
use crate::bottom_pane::CancellationEvent;
use crate::bottom_pane::InputResult;
use crate::conversation_history_widget::ConversationHistoryWidget;
use crate::exec_command::strip_bash_lc_and_escape;
use crate::history_cell::CommandOutput;
use crate::history_cell::HistoryCell;
use crate::history_cell::PatchEventType;
use crate::user_approval_widget::ApprovalRequest;
use codex_file_search::FileMatch;
@@ -47,6 +45,7 @@ use codex_file_search::FileMatch;
pub(crate) struct ChatWidget<'a> {
app_event_tx: AppEventSender,
codex_op_tx: UnboundedSender<Op>,
conversation_history: ConversationHistoryWidget,
bottom_pane: BottomPane<'a>,
config: Config,
initial_user_message: Option<UserMessage>,
@@ -56,6 +55,7 @@ pub(crate) struct ChatWidget<'a> {
// We wait for the final AgentMessage event and then emit the full text
// at once into scrollback so the history contains a single message.
answer_buffer: String,
prompt_label: Option<String>,
}
struct UserMessage {
@@ -86,6 +86,7 @@ impl ChatWidget<'_> {
app_event_tx: AppEventSender,
initial_prompt: Option<String>,
initial_images: Vec<PathBuf>,
prompt_label: Option<String>,
) -> Self {
let (codex_op_tx, mut codex_op_rx) = unbounded_channel::<Op>();
@@ -128,6 +129,7 @@ impl ChatWidget<'_> {
Self {
app_event_tx: app_event_tx.clone(),
codex_op_tx,
conversation_history: ConversationHistoryWidget::new(),
bottom_pane: BottomPane::new(BottomPaneParams {
app_event_tx,
has_input_focus: true,
@@ -140,6 +142,7 @@ impl ChatWidget<'_> {
token_usage: TokenUsage::default(),
reasoning_buffer: String::new(),
answer_buffer: String::new(),
prompt_label,
}
}
@@ -158,9 +161,11 @@ impl ChatWidget<'_> {
self.bottom_pane.handle_paste(text);
}
pub(crate) fn add_to_history(&mut self, cell: HistoryCell) {
self.app_event_tx
.send(AppEvent::InsertHistory(cell.plain_lines()));
/// Emits the last entry's plain lines from conversation_history, if any.
fn emit_last_history_entry(&mut self) {
if let Some(lines) = self.conversation_history.last_entry_plain_lines() {
self.app_event_tx.send(AppEvent::InsertHistory(lines));
}
}
fn submit_user_message(&mut self, user_message: UserMessage) {
@@ -196,18 +201,31 @@ impl ChatWidget<'_> {
// Only show text portion in conversation history for now.
if !text.is_empty() {
self.add_to_history(HistoryCell::new_user_prompt(text.clone()));
self.conversation_history.add_user_message(text.clone());
self.emit_last_history_entry();
}
self.conversation_history.scroll_to_bottom();
}
pub(crate) fn handle_codex_event(&mut self, event: Event) {
let Event { id, msg } = event;
match msg {
EventMsg::SessionConfigured(event) => {
// Record session information at the top of the conversation.
self.conversation_history.add_session_info(
&self.config,
event.clone(),
self.prompt_label.as_deref(),
);
// Immediately surface the session banner / settings summary in
// scrollback so the user can review configuration (model,
// sandbox, approvals, etc.) before interacting.
self.emit_last_history_entry();
// Forward history metadata to the bottom pane so the chat
// composer can navigate through past messages.
self.bottom_pane
.set_history_metadata(event.history_log_id, event.history_entry_count);
// Record session information at the top of the conversation.
self.add_to_history(HistoryCell::new_session_info(&self.config, event, true));
if let Some(user_message) = self.initial_user_message.take() {
// If the user provided an initial message, add it to the
@@ -229,7 +247,9 @@ impl ChatWidget<'_> {
message
};
if !full.is_empty() {
self.add_to_history(HistoryCell::new_agent_message(&self.config, full));
self.conversation_history
.add_agent_message(&self.config, full);
self.emit_last_history_entry();
}
self.request_redraw();
}
@@ -256,7 +276,9 @@ impl ChatWidget<'_> {
text
};
if !full.is_empty() {
self.add_to_history(HistoryCell::new_agent_reasoning(&self.config, full));
self.conversation_history
.add_agent_reasoning(&self.config, full);
self.emit_last_history_entry();
}
self.request_redraw();
}
@@ -277,7 +299,8 @@ impl ChatWidget<'_> {
.set_token_usage(self.token_usage.clone(), self.config.model_context_window);
}
EventMsg::Error(ErrorEvent { message }) => {
self.add_to_history(HistoryCell::new_error_event(message.clone()));
self.conversation_history.add_error(message.clone());
self.emit_last_history_entry();
self.bottom_pane.set_task_running(false);
}
EventMsg::ExecApprovalRequest(ExecApprovalRequestEvent {
@@ -296,7 +319,9 @@ impl ChatWidget<'_> {
.map(|r| format!("\n{r}"))
.unwrap_or_default()
);
self.add_to_history(HistoryCell::new_background_event(text));
self.conversation_history.add_background_event(text);
self.emit_last_history_entry();
self.conversation_history.scroll_to_bottom();
let request = ApprovalRequest::Exec {
id,
@@ -324,10 +349,11 @@ impl ChatWidget<'_> {
// prompt before they have seen *what* is being requested.
// ------------------------------------------------------------------
self.add_to_history(HistoryCell::new_patch_event(
PatchEventType::ApprovalRequest,
changes,
));
self.conversation_history
.add_patch_event(PatchEventType::ApprovalRequest, changes);
self.emit_last_history_entry();
self.conversation_history.scroll_to_bottom();
// Now surface the approval request in the BottomPane as before.
let request = ApprovalRequest::ApplyPatch {
@@ -339,11 +365,13 @@ impl ChatWidget<'_> {
self.request_redraw();
}
EventMsg::ExecCommandBegin(ExecCommandBeginEvent {
call_id: _,
call_id,
command,
cwd: _,
}) => {
self.add_to_history(HistoryCell::new_active_exec_command(command));
self.conversation_history
.add_active_exec_command(call_id, command);
self.emit_last_history_entry();
self.request_redraw();
}
EventMsg::PatchApplyBegin(PatchApplyBeginEvent {
@@ -353,10 +381,12 @@ impl ChatWidget<'_> {
}) => {
// Even when a patch is autoapproved we still display the
// summary so the user can follow along.
self.add_to_history(HistoryCell::new_patch_event(
PatchEventType::ApplyBegin { auto_approved },
changes,
));
self.conversation_history
.add_patch_event(PatchEventType::ApplyBegin { auto_approved }, changes);
self.emit_last_history_entry();
if !auto_approved {
self.conversation_history.scroll_to_bottom();
}
self.request_redraw();
}
EventMsg::ExecCommandEnd(ExecCommandEndEvent {
@@ -365,39 +395,27 @@ impl ChatWidget<'_> {
stdout,
stderr,
}) => {
self.add_to_history(HistoryCell::new_completed_exec_command(
call_id,
CommandOutput {
exit_code,
stdout,
stderr,
duration: Duration::from_secs(0),
},
));
}
EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
call_id: _,
invocation,
}) => {
self.add_to_history(HistoryCell::new_active_mcp_tool_call(invocation));
self.conversation_history
.record_completed_exec_command(call_id, stdout, stderr, exit_code);
self.request_redraw();
}
EventMsg::McpToolCallEnd(McpToolCallEndEvent {
call_id: _,
duration,
invocation,
result,
EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
call_id,
server,
tool,
arguments,
}) => {
self.add_to_history(HistoryCell::new_completed_mcp_tool_call(
80,
invocation,
duration,
result
.as_ref()
.map(|r| r.is_error.unwrap_or(false))
.unwrap_or(false),
result,
));
self.conversation_history
.add_active_mcp_tool_call(call_id, server, tool, arguments);
self.emit_last_history_entry();
self.request_redraw();
}
EventMsg::McpToolCallEnd(mcp_tool_call_end_event) => {
let success = mcp_tool_call_end_event.is_success();
let McpToolCallEndEvent { call_id, result } = mcp_tool_call_end_event;
self.conversation_history
.record_completed_mcp_tool_call(call_id, success, result);
self.request_redraw();
}
EventMsg::GetHistoryEntryResponse(event) => {
let codex_core::protocol::GetHistoryEntryResponseEvent {
@@ -414,7 +432,9 @@ impl ChatWidget<'_> {
self.app_event_tx.send(AppEvent::ExitRequest);
}
event => {
self.add_to_history(HistoryCell::new_background_event(format!("{event:?}")));
self.conversation_history
.add_background_event(format!("{event:?}"));
self.emit_last_history_entry();
self.request_redraw();
}
}
@@ -431,7 +451,22 @@ impl ChatWidget<'_> {
}
pub(crate) fn add_diff_output(&mut self, diff_output: String) {
self.add_to_history(HistoryCell::new_diff_output(diff_output.clone()));
self.conversation_history
.add_diff_output(diff_output.clone());
self.emit_last_history_entry();
self.request_redraw();
}
pub(crate) fn handle_scroll_delta(&mut self, scroll_delta: i32) {
// If the user is trying to scroll exactly one line, we let them, but
// otherwise we assume they are trying to scroll in larger increments.
let magnified_scroll_delta = if scroll_delta == 1 {
1
} else {
// Play with this: perhaps it should be non-linear?
scroll_delta * 2
};
self.conversation_history.scroll(magnified_scroll_delta);
self.request_redraw();
}

View File

@@ -53,4 +53,38 @@ pub struct Cli {
#[clap(skip)]
pub config_overrides: CliConfigOverrides,
/// Override the built-in system prompt (base instructions).
///
/// If the value looks like a path to an existing file, the contents of the
/// file are used. Otherwise, the value itself is used verbatim as the
/// instructions string.
#[arg(long = "experimental-instructions")]
pub experimental_instructions: Option<String>,
}
#[cfg(test)]
mod tests {
use super::Cli;
use clap::CommandFactory;
#[test]
fn help_includes_file_behavior_for_experimental_instructions() {
let mut cmd = Cli::command();
let mut buf: Vec<u8> = Vec::new();
assert!(cmd.write_long_help(&mut buf).is_ok(), "help should render");
let help = match String::from_utf8(buf) {
Ok(s) => s,
Err(e) => panic!("invalid utf8: {e}"),
};
assert!(help.contains("Override the built-in system prompt (base instructions)."));
assert!(help.contains(
"If the value looks like a path to an existing file, the contents of the file are used."
));
assert!(
help.contains(
"Otherwise, the value itself is used verbatim as the instructions string."
)
);
}
}

View File

@@ -0,0 +1,435 @@
use crate::cell_widget::CellWidget;
use crate::history_cell::CommandOutput;
use crate::history_cell::HistoryCell;
use crate::history_cell::PatchEventType;
use codex_core::config::Config;
use codex_core::protocol::FileChange;
use codex_core::protocol::SessionConfiguredEvent;
use ratatui::prelude::*;
use ratatui::style::Style;
use ratatui::widgets::*;
use serde_json::Value as JsonValue;
use std::cell::Cell as StdCell;
use std::cell::Cell;
use std::collections::HashMap;
use std::path::PathBuf;
/// A single history entry plus its cached wrapped-line count.
struct Entry {
cell: HistoryCell,
line_count: Cell<usize>,
}
pub struct ConversationHistoryWidget {
entries: Vec<Entry>,
/// The width (in terminal cells/columns) that [`Entry::line_count`] was
/// computed for. When the available width changes we recompute counts.
cached_width: StdCell<u16>,
scroll_position: usize,
/// Number of lines the last time render_ref() was called
num_rendered_lines: StdCell<usize>,
/// The height of the viewport last time render_ref() was called
last_viewport_height: StdCell<usize>,
has_input_focus: bool,
}
impl ConversationHistoryWidget {
pub fn new() -> Self {
Self {
entries: Vec::new(),
cached_width: StdCell::new(0),
scroll_position: usize::MAX,
num_rendered_lines: StdCell::new(0),
last_viewport_height: StdCell::new(0),
has_input_focus: false,
}
}
/// Negative delta scrolls up; positive delta scrolls down.
pub(crate) fn scroll(&mut self, delta: i32) {
match delta.cmp(&0) {
std::cmp::Ordering::Less => self.scroll_up(-delta as u32),
std::cmp::Ordering::Greater => self.scroll_down(delta as u32),
std::cmp::Ordering::Equal => {}
}
}
fn scroll_up(&mut self, num_lines: u32) {
// If a user is scrolling up from the "stick to bottom" mode, we need to
// map this to a specific scroll position so we can calculate the delta.
// This requires us to care about how tall the screen is.
if self.scroll_position == usize::MAX {
self.scroll_position = self
.num_rendered_lines
.get()
.saturating_sub(self.last_viewport_height.get());
}
self.scroll_position = self.scroll_position.saturating_sub(num_lines as usize);
}
fn scroll_down(&mut self, num_lines: u32) {
// If we're already pinned to the bottom there's nothing to do.
if self.scroll_position == usize::MAX {
return;
}
let viewport_height = self.last_viewport_height.get().max(1);
let num_rendered_lines = self.num_rendered_lines.get();
// Compute the maximum explicit scroll offset that still shows a full
// viewport. This mirrors the calculation in `scroll_page_down()` and
// in the render path.
let max_scroll = num_rendered_lines.saturating_sub(viewport_height);
let new_pos = self.scroll_position.saturating_add(num_lines as usize);
if new_pos >= max_scroll {
// Reached (or passed) the bottom switch to sticktobottom mode
// so that additional output keeps the view pinned automatically.
self.scroll_position = usize::MAX;
} else {
self.scroll_position = new_pos;
}
}
pub fn scroll_to_bottom(&mut self) {
self.scroll_position = usize::MAX;
}
/// Note `model` could differ from `config.model` if the agent decided to
/// use a different model than the one requested by the user.
pub fn add_session_info(
&mut self,
config: &Config,
event: SessionConfiguredEvent,
prompt_label: Option<&str>,
) {
// In practice, SessionConfiguredEvent should always be the first entry
// in the history, but it is possible that an error could be sent
// before the session info.
let has_welcome_message = self
.entries
.iter()
.any(|entry| matches!(entry.cell, HistoryCell::WelcomeMessage { .. }));
self.add_to_history(HistoryCell::new_session_info(
config,
event,
!has_welcome_message,
prompt_label,
));
}
pub fn add_user_message(&mut self, message: String) {
self.add_to_history(HistoryCell::new_user_prompt(message));
}
pub fn add_agent_message(&mut self, config: &Config, message: String) {
self.add_to_history(HistoryCell::new_agent_message(config, message));
}
pub fn add_agent_reasoning(&mut self, config: &Config, text: String) {
self.add_to_history(HistoryCell::new_agent_reasoning(config, text));
}
pub fn add_background_event(&mut self, message: String) {
self.add_to_history(HistoryCell::new_background_event(message));
}
pub fn add_diff_output(&mut self, diff_output: String) {
self.add_to_history(HistoryCell::new_diff_output(diff_output));
}
pub fn add_error(&mut self, message: String) {
self.add_to_history(HistoryCell::new_error_event(message));
}
/// Add a pending patch entry (before user approval).
pub fn add_patch_event(
&mut self,
event_type: PatchEventType,
changes: HashMap<PathBuf, FileChange>,
) {
self.add_to_history(HistoryCell::new_patch_event(event_type, changes));
}
pub fn add_active_exec_command(&mut self, call_id: String, command: Vec<String>) {
self.add_to_history(HistoryCell::new_active_exec_command(call_id, command));
}
pub fn add_active_mcp_tool_call(
&mut self,
call_id: String,
server: String,
tool: String,
arguments: Option<JsonValue>,
) {
self.add_to_history(HistoryCell::new_active_mcp_tool_call(
call_id, server, tool, arguments,
));
}
fn add_to_history(&mut self, cell: HistoryCell) {
let width = self.cached_width.get();
let count = if width > 0 { cell.height(width) } else { 0 };
self.entries.push(Entry {
cell,
line_count: Cell::new(count),
});
}
/// Return the lines for the most recently appended entry (if any) so the
/// parent widget can surface them via the new scrollback insertion path.
pub(crate) fn last_entry_plain_lines(&self) -> Option<Vec<Line<'static>>> {
self.entries.last().map(|e| e.cell.plain_lines())
}
pub fn record_completed_exec_command(
&mut self,
call_id: String,
stdout: String,
stderr: String,
exit_code: i32,
) {
let width = self.cached_width.get();
for entry in self.entries.iter_mut() {
let cell = &mut entry.cell;
if let HistoryCell::ActiveExecCommand {
call_id: history_id,
command,
start,
..
} = cell
{
if &call_id == history_id {
*cell = HistoryCell::new_completed_exec_command(
command.clone(),
CommandOutput {
exit_code,
stdout,
stderr,
duration: start.elapsed(),
},
);
// Update cached line count.
if width > 0 {
entry.line_count.set(cell.height(width));
}
break;
}
}
}
}
pub fn record_completed_mcp_tool_call(
&mut self,
call_id: String,
success: bool,
result: Result<mcp_types::CallToolResult, String>,
) {
let width = self.cached_width.get();
for entry in self.entries.iter_mut() {
if let HistoryCell::ActiveMcpToolCall {
call_id: history_id,
invocation,
start,
..
} = &entry.cell
{
if &call_id == history_id {
let completed = HistoryCell::new_completed_mcp_tool_call(
width,
invocation.clone(),
*start,
success,
result,
);
entry.cell = completed;
if width > 0 {
entry.line_count.set(entry.cell.height(width));
}
break;
}
}
}
}
}
impl WidgetRef for ConversationHistoryWidget {
fn render_ref(&self, area: Rect, buf: &mut Buffer) {
let (title, border_style) = if self.has_input_focus {
(
"Messages (↑/↓ or j/k = line, b/space = page)",
Style::default().fg(Color::LightYellow),
)
} else {
("Messages (tab to focus)", Style::default().dim())
};
let block = Block::default()
.title(title)
.borders(Borders::ALL)
.border_type(BorderType::Rounded)
.border_style(border_style);
// Compute the inner area that will be available for the list after
// the surrounding `Block` is drawn.
let inner = block.inner(area);
let viewport_height = inner.height as usize;
// Cache (and if necessary recalculate) the wrapped line counts for every
// [`HistoryCell`] so that our scrolling math accounts for text
// wrapping. We always reserve one column on the right-hand side for the
// scrollbar so that the content never renders "under" the scrollbar.
let effective_width = inner.width.saturating_sub(1);
if effective_width == 0 {
return; // Nothing to draw avoid division by zero.
}
// Recompute cache if the effective width changed.
let num_lines: usize = if self.cached_width.get() != effective_width {
self.cached_width.set(effective_width);
let mut num_lines: usize = 0;
for entry in &self.entries {
let count = entry.cell.height(effective_width);
num_lines += count;
entry.line_count.set(count);
}
num_lines
} else {
self.entries.iter().map(|e| e.line_count.get()).sum()
};
// Determine the scroll position. Note the existing value of
// `self.scroll_position` could exceed the maximum scroll offset if the
// user made the window wider since the last render.
let max_scroll = num_lines.saturating_sub(viewport_height);
let scroll_pos = if self.scroll_position == usize::MAX {
max_scroll
} else {
self.scroll_position.min(max_scroll)
};
// ------------------------------------------------------------------
// Render order:
// 1. Clear full widget area (avoid artifacts from prior frame).
// 2. Draw the surrounding Block (border and title).
// 3. Render *each* visible HistoryCell into its own sub-Rect while
// respecting partial visibility at the top and bottom.
// 4. Draw the scrollbar track / thumb in the reserved column.
// ------------------------------------------------------------------
// Clear entire widget area first.
Clear.render(area, buf);
// Draw border + title.
block.render(area, buf);
// ------------------------------------------------------------------
// Calculate which cells are visible for the current scroll position
// and paint them one by one.
// ------------------------------------------------------------------
let mut y_cursor = inner.y; // first line inside viewport
let mut remaining_height = inner.height as usize;
let mut lines_to_skip = scroll_pos; // number of wrapped lines to skip (above viewport)
for entry in &self.entries {
let cell_height = entry.line_count.get();
// Completely above viewport? Skip whole cell.
if lines_to_skip >= cell_height {
lines_to_skip -= cell_height;
continue;
}
// Determine how much of this cell is visible.
let visible_height = (cell_height - lines_to_skip).min(remaining_height);
if visible_height == 0 {
break; // no space left
}
let cell_rect = Rect {
x: inner.x,
y: y_cursor,
width: effective_width,
height: visible_height as u16,
};
entry.cell.render_window(lines_to_skip, cell_rect, buf);
// Advance cursor inside viewport.
y_cursor += visible_height as u16;
remaining_height -= visible_height;
// After the first (possibly partially skipped) cell, we no longer
// need to skip lines at the top.
lines_to_skip = 0;
if remaining_height == 0 {
break; // viewport filled
}
}
// Always render a scrollbar *track* so the reserved column is filled.
let overflow = num_lines.saturating_sub(viewport_height);
let mut scroll_state = ScrollbarState::default()
// The Scrollbar widget expects the *content* height minus the
// viewport height. When there is no overflow we still provide 0
// so that the widget renders only the track without a thumb.
.content_length(overflow)
.position(scroll_pos);
{
// Choose a thumb color that stands out only when this pane has focus so that the
// user's attention is naturally drawn to the active viewport. When unfocused we show
// a low-contrast thumb so the scrollbar fades into the background without becoming
// invisible.
let thumb_style = if self.has_input_focus {
Style::reset().fg(Color::LightYellow)
} else {
Style::reset().fg(Color::Gray)
};
// By default the Scrollbar widget inherits any style that was
// present in the underlying buffer cells. That means if a colored
// line happens to be underneath the scrollbar, the track (and
// potentially the thumb) adopt that color. Explicitly setting the
// track/thumb styles ensures we always draw the scrollbar with a
// consistent palette regardless of what content is behind it.
StatefulWidget::render(
Scrollbar::new(ScrollbarOrientation::VerticalRight)
.begin_symbol(Some(""))
.end_symbol(Some(""))
.begin_style(Style::reset().fg(Color::DarkGray))
.end_style(Style::reset().fg(Color::DarkGray))
.thumb_symbol("")
.thumb_style(thumb_style)
.track_symbol(Some(""))
.track_style(Style::reset().fg(Color::DarkGray)),
inner,
buf,
&mut scroll_state,
);
}
// Update auxiliary stats that the scroll handlers rely on.
self.num_rendered_lines.set(num_lines);
self.last_viewport_height.set(viewport_height);
}
}
/// Common [`Wrap`] configuration used for both measurement and rendering so
/// they stay in sync.
#[inline]
pub(crate) const fn wrap_cfg() -> ratatui::widgets::Wrap {
ratatui::widgets::Wrap { trim: false }
}

View File

@@ -1,3 +1,4 @@
use crate::cell_widget::CellWidget;
use crate::exec_command::escape_command;
use crate::markdown::append_markdown;
use crate::text_block::TextBlock;
@@ -10,10 +11,11 @@ use codex_core::WireApi;
use codex_core::config::Config;
use codex_core::model_supports_reasoning_summaries;
use codex_core::protocol::FileChange;
use codex_core::protocol::McpInvocation;
use codex_core::protocol::SessionConfiguredEvent;
use image::DynamicImage;
use image::GenericImageView;
use image::ImageReader;
use lazy_static::lazy_static;
use mcp_types::EmbeddedResourceResource;
use mcp_types::ResourceLink;
use ratatui::prelude::*;
@@ -22,10 +24,14 @@ use ratatui::style::Modifier;
use ratatui::style::Style;
use ratatui::text::Line as RtLine;
use ratatui::text::Span as RtSpan;
use ratatui_image::Image as TuiImage;
use ratatui_image::Resize as ImgResize;
use ratatui_image::picker::ProtocolType;
use std::collections::HashMap;
use std::io::Cursor;
use std::path::PathBuf;
use std::time::Duration;
use std::time::Instant;
use tracing::error;
pub(crate) struct CommandOutput {
@@ -40,21 +46,6 @@ pub(crate) enum PatchEventType {
ApplyBegin { auto_approved: bool },
}
fn span_to_static(span: &Span) -> Span<'static> {
Span {
style: span.style,
content: std::borrow::Cow::Owned(span.content.clone().into_owned()),
}
}
fn line_to_static(line: &Line) -> Line<'static> {
Line {
style: line.style,
alignment: line.alignment,
spans: line.spans.iter().map(span_to_static).collect(),
}
}
/// Represents an event to display in the conversation history. Returns its
/// `Vec<Line<'static>>` representation to make it easier to display in a
/// scrollable list.
@@ -72,13 +63,25 @@ pub(crate) enum HistoryCell {
AgentReasoning { view: TextBlock },
/// An exec tool call that has not finished yet.
ActiveExecCommand { view: TextBlock },
ActiveExecCommand {
call_id: String,
/// The shell command, escaped and formatted.
command: String,
start: Instant,
view: TextBlock,
},
/// Completed exec tool call.
CompletedExecCommand { view: TextBlock },
/// An MCP tool call that has not finished yet.
ActiveMcpToolCall { view: TextBlock },
ActiveMcpToolCall {
call_id: String,
/// Formatted line that shows the command name and arguments
invocation: Line<'static>,
start: Instant,
view: TextBlock,
},
/// Completed MCP tool call where we show the result serialized as JSON.
CompletedMcpToolCall { view: TextBlock },
@@ -91,7 +94,13 @@ pub(crate) enum HistoryCell {
// resized version avoids doing the potentially expensive rescale twice
// because the scroll-view first calls `height()` for layouting and then
// `render_window()` for painting.
CompletedMcpToolCallWithImageOutput { _image: DynamicImage },
CompletedMcpToolCallWithImageOutput {
image: DynamicImage,
/// Cached data derived from the current terminal width. The cache is
/// invalidated whenever the width changes (e.g. when the user
/// resizes the window).
render_cache: std::cell::RefCell<Option<ImageRenderCache>>,
},
/// Background event.
BackgroundEvent { view: TextBlock },
@@ -131,9 +140,7 @@ impl HistoryCell {
| HistoryCell::CompletedMcpToolCall { view }
| HistoryCell::PendingPatch { view }
| HistoryCell::ActiveExecCommand { view, .. }
| HistoryCell::ActiveMcpToolCall { view, .. } => {
view.lines.iter().map(line_to_static).collect()
}
| HistoryCell::ActiveMcpToolCall { view, .. } => view.lines.clone(),
HistoryCell::CompletedMcpToolCallWithImageOutput { .. } => vec![
Line::from("tool result (image output omitted)"),
Line::from(""),
@@ -144,6 +151,7 @@ impl HistoryCell {
config: &Config,
event: SessionConfiguredEvent,
is_first_event: bool,
prompt_label: Option<&str>,
) -> Self {
let SessionConfiguredEvent {
model,
@@ -176,6 +184,9 @@ impl HistoryCell {
("approval", config.approval_policy.to_string()),
("sandbox", summarize_sandbox_policy(&config.sandbox_policy)),
];
if let Some(label) = prompt_label {
entries.push(("prompt", label.to_string()));
}
if config.model_provider.wire_api == WireApi::Responses
&& model_supports_reasoning_summaries(config)
{
@@ -245,8 +256,9 @@ impl HistoryCell {
}
}
pub(crate) fn new_active_exec_command(command: Vec<String>) -> Self {
pub(crate) fn new_active_exec_command(call_id: String, command: Vec<String>) -> Self {
let command_escaped = escape_command(&command);
let start = Instant::now();
let lines: Vec<Line<'static>> = vec![
Line::from(vec!["command".magenta(), " running...".dim()]),
@@ -255,6 +267,9 @@ impl HistoryCell {
];
HistoryCell::ActiveExecCommand {
call_id,
command: command_escaped,
start,
view: TextBlock::new(lines),
}
}
@@ -299,15 +314,41 @@ impl HistoryCell {
}
}
pub(crate) fn new_active_mcp_tool_call(invocation: McpInvocation) -> Self {
let title_line = Line::from(vec!["tool".magenta(), " running...".dim()]);
let lines: Vec<Line> = vec![
title_line,
format_mcp_invocation(invocation.clone()),
Line::from(""),
pub(crate) fn new_active_mcp_tool_call(
call_id: String,
server: String,
tool: String,
arguments: Option<serde_json::Value>,
) -> Self {
// Format the arguments as compact JSON so they roughly fit on one
// line. If there are no arguments we keep it empty so the invocation
// mirrors a function-style call.
let args_str = arguments
.as_ref()
.map(|v| {
// Use compact form to keep things short but readable.
serde_json::to_string(v).unwrap_or_else(|_| v.to_string())
})
.unwrap_or_default();
let invocation_spans = vec![
Span::styled(server, Style::default().fg(Color::Blue)),
Span::raw("."),
Span::styled(tool, Style::default().fg(Color::Blue)),
Span::raw("("),
Span::styled(args_str, Style::default().fg(Color::Gray)),
Span::raw(")"),
];
let invocation = Line::from(invocation_spans);
let start = Instant::now();
let title_line = Line::from(vec!["tool".magenta(), " running...".dim()]);
let lines: Vec<Line<'static>> = vec![title_line, invocation.clone(), Line::from("")];
HistoryCell::ActiveMcpToolCall {
call_id,
invocation,
start,
view: TextBlock::new(lines),
}
}
@@ -345,7 +386,10 @@ impl HistoryCell {
}
};
Some(HistoryCell::CompletedMcpToolCallWithImageOutput { _image: image })
Some(HistoryCell::CompletedMcpToolCallWithImageOutput {
image,
render_cache: std::cell::RefCell::new(None),
})
} else {
None
}
@@ -356,8 +400,8 @@ impl HistoryCell {
pub(crate) fn new_completed_mcp_tool_call(
num_cols: u16,
invocation: McpInvocation,
duration: Duration,
invocation: Line<'static>,
start: Instant,
success: bool,
result: Result<mcp_types::CallToolResult, String>,
) -> Self {
@@ -365,7 +409,7 @@ impl HistoryCell {
return cell;
}
let duration = format_duration(duration);
let duration = format_duration(start.elapsed());
let status_str = if success { "success" } else { "failed" };
let title_line = Line::from(vec![
"tool".magenta(),
@@ -380,7 +424,7 @@ impl HistoryCell {
let mut lines: Vec<Line<'static>> = Vec::new();
lines.push(title_line);
lines.push(format_mcp_invocation(invocation));
lines.push(invocation);
match result {
Ok(mcp_types::CallToolResult { content, .. }) => {
@@ -541,6 +585,167 @@ impl HistoryCell {
}
}
#[cfg(test)]
mod tests {
use super::*;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config::ConfigToml;
use uuid::Uuid;
use tempfile::TempDir;
fn minimal_config() -> Config {
let cwd = match TempDir::new() {
Ok(t) => t,
Err(e) => panic!("tempdir error: {e}"),
};
let codex_home = match TempDir::new() {
Ok(t) => t,
Err(e) => panic!("tempdir error: {e}"),
};
let cfg = ConfigToml {
..Default::default()
};
let overrides = ConfigOverrides {
cwd: Some(cwd.path().to_path_buf()),
..Default::default()
};
match Config::load_from_base_config_with_overrides(
cfg,
overrides,
codex_home.path().to_path_buf(),
) {
Ok(c) => c,
Err(e) => panic!("config error: {e}"),
}
}
fn lines_to_strings(lines: &[Line<'static>]) -> Vec<String> {
lines
.iter()
.map(|line| line.spans.iter().map(|s| s.content.to_string()).collect())
.collect()
}
#[test]
fn welcome_includes_prompt_label_experimental() {
let cfg = minimal_config();
let event = SessionConfiguredEvent {
session_id: Uuid::nil(),
model: cfg.model.clone(),
history_log_id: 0,
history_entry_count: 0,
};
let cell = HistoryCell::new_session_info(&cfg, event, true, Some("experimental"));
let lines = cell.plain_lines();
let strings = lines_to_strings(&lines);
assert!(
strings.iter().any(|s| s.contains("prompt: experimental")),
"welcome should include prompt label; got: {strings:?}"
);
}
#[test]
fn welcome_includes_prompt_label_filename() {
let cfg = minimal_config();
let event = SessionConfiguredEvent {
session_id: Uuid::nil(),
model: cfg.model.clone(),
history_log_id: 0,
history_entry_count: 0,
};
let cell = HistoryCell::new_session_info(&cfg, event, true, Some("instructions.md"));
let lines = cell.plain_lines();
let strings = lines_to_strings(&lines);
assert!(
strings
.iter()
.any(|s| s.contains("prompt: instructions.md")),
"welcome should include filename prompt label; got: {strings:?}"
);
}
}
// ---------------------------------------------------------------------------
// `CellWidget` implementation most variants delegate to their internal
// `TextBlock`. Variants that need custom painting can add their own logic in
// the match arms.
// ---------------------------------------------------------------------------
impl CellWidget for HistoryCell {
fn height(&self, width: u16) -> usize {
match self {
HistoryCell::WelcomeMessage { view }
| HistoryCell::UserPrompt { view }
| HistoryCell::AgentMessage { view }
| HistoryCell::AgentReasoning { view }
| HistoryCell::BackgroundEvent { view }
| HistoryCell::GitDiffOutput { view }
| HistoryCell::ErrorEvent { view }
| HistoryCell::SessionInfo { view }
| HistoryCell::CompletedExecCommand { view }
| HistoryCell::CompletedMcpToolCall { view }
| HistoryCell::PendingPatch { view }
| HistoryCell::ActiveExecCommand { view, .. }
| HistoryCell::ActiveMcpToolCall { view, .. } => view.height(width),
HistoryCell::CompletedMcpToolCallWithImageOutput {
image,
render_cache,
} => ensure_image_cache(image, width, render_cache),
}
}
fn render_window(&self, first_visible_line: usize, area: Rect, buf: &mut Buffer) {
match self {
HistoryCell::WelcomeMessage { view }
| HistoryCell::UserPrompt { view }
| HistoryCell::AgentMessage { view }
| HistoryCell::AgentReasoning { view }
| HistoryCell::BackgroundEvent { view }
| HistoryCell::GitDiffOutput { view }
| HistoryCell::ErrorEvent { view }
| HistoryCell::SessionInfo { view }
| HistoryCell::CompletedExecCommand { view }
| HistoryCell::CompletedMcpToolCall { view }
| HistoryCell::PendingPatch { view }
| HistoryCell::ActiveExecCommand { view, .. }
| HistoryCell::ActiveMcpToolCall { view, .. } => {
view.render_window(first_visible_line, area, buf)
}
HistoryCell::CompletedMcpToolCallWithImageOutput {
image,
render_cache,
} => {
// Ensure we have a cached, resized copy that matches the current width.
// `height()` should have prepared the cache, but if something invalidated it
// (e.g. the first `render_window()` call happens *before* `height()` after a
// resize) we rebuild it here.
let width_cells = area.width;
// Ensure the cache is up-to-date and extract the scaled image.
let _ = ensure_image_cache(image, width_cells, render_cache);
let Some(resized) = render_cache
.borrow()
.as_ref()
.map(|c| c.scaled_image.clone())
else {
return;
};
let picker = &*TERMINAL_PICKER;
if let Ok(protocol) = picker.new_protocol(resized, area, ImgResize::Fit(None)) {
let img_widget = TuiImage::new(&protocol);
img_widget.render(area, buf);
}
}
}
}
}
fn create_diff_summary(changes: HashMap<PathBuf, FileChange>) -> Vec<String> {
// Build a concise, humanreadable summary list similar to the
// `git status` short format so the user can reason about the
@@ -573,23 +778,119 @@ fn create_diff_summary(changes: HashMap<PathBuf, FileChange>) -> Vec<String> {
summaries
}
fn format_mcp_invocation<'a>(invocation: McpInvocation) -> Line<'a> {
let args_str = invocation
.arguments
.as_ref()
.map(|v| {
// Use compact form to keep things short but readable.
serde_json::to_string(v).unwrap_or_else(|_| v.to_string())
})
.unwrap_or_default();
// -------------------------------------
// Helper types for image rendering
// -------------------------------------
let invocation_spans = vec![
Span::styled(invocation.server.clone(), Style::default().fg(Color::Blue)),
Span::raw("."),
Span::styled(invocation.tool.clone(), Style::default().fg(Color::Blue)),
Span::raw("("),
Span::styled(args_str, Style::default().fg(Color::Gray)),
Span::raw(")"),
];
Line::from(invocation_spans)
/// Cached information for rendering an image inside a conversation cell.
///
/// The cache ties the resized image to a *specific* content width (in
/// terminal cells). Whenever the terminal is resized and the width changes
/// we need to re-compute the scaled variant so that it still fits the
/// available space. Keeping the resized copy around saves a costly rescale
/// between the back-to-back `height()` and `render_window()` calls that the
/// scroll-view performs while laying out the UI.
pub(crate) struct ImageRenderCache {
/// Width in *terminal cells* the cached image was generated for.
width_cells: u16,
/// Height in *terminal rows* that the conversation cell must occupy so
/// the whole image becomes visible.
height_rows: usize,
/// The resized image that fits the given width / height constraints.
scaled_image: DynamicImage,
}
lazy_static! {
static ref TERMINAL_PICKER: ratatui_image::picker::Picker = {
use ratatui_image::picker::Picker;
use ratatui_image::picker::cap_parser::QueryStdioOptions;
// Ask the terminal for capabilities and explicit font size. Request the
// Kitty *text-sizing protocol* as a fallback mechanism for terminals
// (like iTerm2) that do not reply to the standard CSI 16/18 queries.
match Picker::from_query_stdio_with_options(QueryStdioOptions {
text_sizing_protocol: true,
}) {
Ok(picker) => picker,
Err(err) => {
// Fall back to the conservative default that assumes ~8×16 px cells.
// Still better than breaking the build in a headless test run.
tracing::warn!("terminal capability query failed: {err:?}; using default font size");
Picker::from_fontsize((8, 16))
}
}
};
}
/// Resize `image` to fit into `width_cells`×10-rows keeping the original aspect
/// ratio. The function updates `render_cache` and returns the number of rows
/// (<= 10) the picture will occupy.
fn ensure_image_cache(
image: &DynamicImage,
width_cells: u16,
render_cache: &std::cell::RefCell<Option<ImageRenderCache>>,
) -> usize {
if let Some(cache) = render_cache.borrow().as_ref() {
if cache.width_cells == width_cells {
return cache.height_rows;
}
}
let picker = &*TERMINAL_PICKER;
let (char_w_px, char_h_px) = picker.font_size();
// Heuristic to compensate for Hi-DPI terminals (iTerm2 on Retina Mac) that
// report logical pixels (≈ 8×16) while the iTerm2 graphics protocol
// expects *device* pixels. Empirically the device-pixel-ratio is almost
// always 2 on macOS Retina panels.
let hidpi_scale = if picker.protocol_type() == ProtocolType::Iterm2 {
2.0f64
} else {
1.0
};
// The fallback Halfblocks protocol encodes two pixel rows per cell, so each
// terminal *row* represents only half the (possibly scaled) font height.
let effective_char_h_px: f64 = if picker.protocol_type() == ProtocolType::Halfblocks {
(char_h_px as f64) * hidpi_scale / 2.0
} else {
(char_h_px as f64) * hidpi_scale
};
let char_w_px_f64 = (char_w_px as f64) * hidpi_scale;
const MAX_ROWS: f64 = 10.0;
let max_height_px: f64 = effective_char_h_px * MAX_ROWS;
let (orig_w_px, orig_h_px) = {
let (w, h) = image.dimensions();
(w as f64, h as f64)
};
if orig_w_px == 0.0 || orig_h_px == 0.0 || width_cells == 0 {
*render_cache.borrow_mut() = None;
return 0;
}
let max_w_px = char_w_px_f64 * width_cells as f64;
let scale_w = max_w_px / orig_w_px;
let scale_h = max_height_px / orig_h_px;
let scale = scale_w.min(scale_h).min(1.0);
use image::imageops::FilterType;
let scaled_w_px = (orig_w_px * scale).round().max(1.0) as u32;
let scaled_h_px = (orig_h_px * scale).round().max(1.0) as u32;
let scaled_image = image.resize(scaled_w_px, scaled_h_px, FilterType::Lanczos3);
let height_rows = ((scaled_h_px as f64 / effective_char_h_px).ceil()) as usize;
let new_cache = ImageRenderCache {
width_cells,
height_rows,
scaled_image,
};
*render_cache.borrow_mut() = Some(new_cache);
height_rows
}

View File

@@ -21,7 +21,7 @@ use ratatui::text::Line;
use ratatui::text::Span;
/// Insert `lines` above the viewport.
pub(crate) fn insert_history_lines(terminal: &mut tui::Tui, lines: Vec<Line>) {
pub(crate) fn insert_history_lines(terminal: &mut tui::Tui, lines: Vec<Line<'static>>) {
let screen_size = terminal.backend().size().unwrap_or(Size::new(0, 0));
let mut area = terminal.get_frame().area();

View File

@@ -6,14 +6,17 @@ use app::App;
use codex_core::config::Config;
use codex_core::config::ConfigOverrides;
use codex_core::config_types::SandboxMode;
use codex_core::openai_api_key::OPENAI_API_KEY_ENV_VAR;
use codex_core::openai_api_key::get_openai_api_key;
use codex_core::openai_api_key::set_openai_api_key;
use codex_core::protocol::AskForApproval;
use codex_core::util::is_inside_git_repo;
use codex_login::load_auth;
use codex_core::util::maybe_read_file;
use codex_login::try_read_openai_api_key;
use log_layer::TuiLogLayer;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use tracing::error;
use tracing_appender::non_blocking;
use tracing_subscriber::EnvFilter;
use tracing_subscriber::prelude::*;
@@ -22,9 +25,11 @@ mod app;
mod app_event;
mod app_event_sender;
mod bottom_pane;
mod cell_widget;
mod chatwidget;
mod citation_regex;
mod cli;
mod conversation_history_widget;
mod exec_command;
mod file_search;
mod get_git_diff;
@@ -32,7 +37,9 @@ mod git_warning_screen;
mod history_cell;
mod insert_history;
mod log_layer;
mod login_screen;
mod markdown;
mod scroll_event_helper;
mod slash_command;
mod status_indicator_widget;
mod text_block;
@@ -42,7 +49,7 @@ mod user_approval_widget;
pub use cli::Cli;
pub async fn run_main(
pub fn run_main(
cli: Cli,
codex_linux_sandbox_exe: Option<PathBuf>,
) -> std::io::Result<codex_core::protocol::TokenUsage> {
@@ -63,8 +70,50 @@ pub async fn run_main(
)
};
let config = {
// Capture any read error for experimental instructions so we can log it
// after the tracing subscriber has been initialized.
let mut experimental_read_error: Option<String> = None;
let (config, experimental_prompt_label) = {
// Load configuration and support CLI overrides.
// If the experimental instructions flag points at a file, read its
// contents; otherwise use the value verbatim. Avoid printing to stdout
// or stderr in this library crate fallback to the raw string on
// errors.
let base_instructions =
cli.experimental_instructions
.as_deref()
.and_then(|s| match maybe_read_file(s) {
Ok(v) => v,
Err(e) => {
experimental_read_error = Some(format!(
"Failed to read experimental instructions from '{s}': {e}"
));
Some(s.to_string())
}
});
// Derive a label shown in the welcome banner describing the origin of
// the experimental instructions: filename for file paths and
// "experimental" for literals.
let experimental_prompt_label = cli.experimental_instructions.as_deref().map(|s| {
let p = Path::new(s);
if p.is_file() {
p.file_name()
.map(|os| os.to_string_lossy().to_string())
.unwrap_or_else(|| s.to_string())
} else {
"experimental".to_string()
}
});
// Do not show a label if the file was empty (base_instructions is None).
let experimental_prompt_label = if base_instructions.is_some() {
experimental_prompt_label
} else {
None
};
let overrides = ConfigOverrides {
model: cli.model.clone(),
approval_policy,
@@ -73,8 +122,7 @@ pub async fn run_main(
model_provider: None,
config_profile: cli.config_profile.clone(),
codex_linux_sandbox_exe,
base_instructions: None,
include_plan_tool: None,
base_instructions,
};
// Parse `-c` overrides from the CLI.
let cli_kv_overrides = match cli.config_overrides.parse_overrides() {
@@ -88,7 +136,7 @@ pub async fn run_main(
#[allow(clippy::print_stderr)]
match Config::load_with_cli_overrides(cli_kv_overrides, overrides) {
Ok(config) => config,
Ok(config) => (config, experimental_prompt_label),
Err(err) => {
eprintln!("Error loading configuration: {err}");
std::process::exit(1);
@@ -138,38 +186,37 @@ pub async fn run_main(
.with(tui_layer)
.try_init();
let show_login_screen = should_show_login_screen(&config);
if show_login_screen {
std::io::stdout()
.write_all(b"No API key detected.\nLogin with your ChatGPT account? [Yn] ")?;
std::io::stdout().flush()?;
let mut input = String::new();
std::io::stdin().read_line(&mut input)?;
let trimmed = input.trim();
if !(trimmed.is_empty() || trimmed.eq_ignore_ascii_case("y")) {
std::process::exit(1);
}
// Spawn a task to run the login command.
// Block until the login command is finished.
codex_login::login_with_chatgpt(&config.codex_home, false).await?;
std::io::stdout().write_all(b"Login successful.\n")?;
if let Some(msg) = experimental_read_error {
// Now that logging is initialized, record a warning so the user
// can see that Codex fell back to using the literal string.
tracing::warn!("{msg}");
}
let show_login_screen = should_show_login_screen(&config);
// Determine whether we need to display the "not a git repo" warning
// modal. The flag is shown when the current working directory is *not*
// inside a Git repository **and** the user did *not* pass the
// `--allow-no-git-exec` flag.
let show_git_warning = !cli.skip_git_repo_check && !is_inside_git_repo(&config);
run_ratatui_app(cli, config, show_git_warning, log_rx)
.map_err(|err| std::io::Error::other(err.to_string()))
run_ratatui_app(
cli,
config,
show_login_screen,
show_git_warning,
experimental_prompt_label,
log_rx,
)
.map_err(|err| std::io::Error::other(err.to_string()))
}
fn run_ratatui_app(
cli: Cli,
config: Config,
show_login_screen: bool,
show_git_warning: bool,
experimental_prompt_label: Option<String>,
mut log_rx: tokio::sync::mpsc::UnboundedReceiver<String>,
) -> color_eyre::Result<codex_core::protocol::TokenUsage> {
color_eyre::install()?;
@@ -183,7 +230,14 @@ fn run_ratatui_app(
terminal.clear()?;
let Cli { prompt, images, .. } = cli;
let mut app = App::new(config.clone(), prompt, show_git_warning, images);
let mut app = App::new(
config.clone(),
prompt,
show_login_screen,
show_git_warning,
images,
experimental_prompt_label,
);
// Bridge log receiver into the AppEvent channel so latest log lines update the UI.
{
@@ -217,19 +271,88 @@ fn restore() {
#[allow(clippy::unwrap_used)]
fn should_show_login_screen(config: &Config) -> bool {
if config.model_provider.requires_auth {
if is_in_need_of_openai_api_key(config) {
// Reading the OpenAI API key is an async operation because it may need
// to refresh the token. Block on it.
let codex_home = config.codex_home.clone();
match load_auth(&codex_home) {
Ok(Some(_)) => false,
Ok(None) => true,
Err(err) => {
error!("Failed to read auth.json: {err}");
true
let (tx, rx) = tokio::sync::oneshot::channel();
tokio::spawn(async move {
match try_read_openai_api_key(&codex_home).await {
Ok(openai_api_key) => {
set_openai_api_key(openai_api_key);
tx.send(false).unwrap();
}
Err(_) => {
tx.send(true).unwrap();
}
}
}
});
// TODO(mbolin): Impose some sort of timeout.
tokio::task::block_in_place(|| rx.blocking_recv()).unwrap()
} else {
false
}
}
fn is_in_need_of_openai_api_key(config: &Config) -> bool {
let is_using_openai_key = config
.model_provider
.env_key
.as_ref()
.map(|s| s == OPENAI_API_KEY_ENV_VAR)
.unwrap_or(false);
is_using_openai_key && get_openai_api_key().is_none()
}
#[cfg(test)]
mod tests {
use codex_core::util::maybe_read_file;
use std::fs;
use std::path::PathBuf;
use uuid::Uuid;
fn temp_path() -> PathBuf {
let mut p = std::env::temp_dir();
p.push(format!("codex_tui_test_{}.txt", Uuid::new_v4()));
p
}
#[test]
fn maybe_read_file_returns_literal_for_non_path() {
let res = match maybe_read_file("Base instructions as a string") {
Ok(v) => v,
Err(e) => panic!("error: {e}"),
};
assert_eq!(res, Some("Base instructions as a string".to_string()));
}
#[test]
fn maybe_read_file_reads_and_trims_file_contents() {
let p = temp_path();
if let Err(e) = fs::write(&p, " file text \n") {
panic!("write temp file: {e}");
}
let p_s = p.to_string_lossy().to_string();
let res = match maybe_read_file(&p_s) {
Ok(v) => v,
Err(e) => panic!("error: {e}"),
};
assert_eq!(res, Some("file text".to_string()));
let _ = std::fs::remove_file(&p);
}
#[test]
fn maybe_read_file_empty_file_returns_none() {
let p = temp_path();
if let Err(e) = fs::write(&p, " \n\t") {
panic!("write temp file: {e}");
}
let p_s = p.to_string_lossy().to_string();
let res = match maybe_read_file(&p_s) {
Ok(v) => v,
Err(e) => panic!("error: {e}"),
};
assert_eq!(res, None);
let _ = std::fs::remove_file(&p);
}
}

View File

@@ -0,0 +1,46 @@
use std::path::PathBuf;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use ratatui::buffer::Buffer;
use ratatui::layout::Rect;
use ratatui::widgets::Paragraph;
use ratatui::widgets::Widget as _;
use ratatui::widgets::WidgetRef;
use crate::app_event::AppEvent;
use crate::app_event_sender::AppEventSender;
pub(crate) struct LoginScreen {
app_event_tx: AppEventSender,
/// Use this with login_with_chatgpt() in login/src/lib.rs and, if
/// successful, update the in-memory config via
/// codex_core::openai_api_key::set_openai_api_key().
#[allow(dead_code)]
codex_home: PathBuf,
}
impl LoginScreen {
pub(crate) fn new(app_event_tx: AppEventSender, codex_home: PathBuf) -> Self {
Self {
app_event_tx,
codex_home,
}
}
pub(crate) fn handle_key_event(&mut self, key_event: KeyEvent) {
if let KeyCode::Char('q') = key_event.code {
self.app_event_tx.send(AppEvent::ExitRequest);
}
}
}
impl WidgetRef for &LoginScreen {
fn render_ref(&self, area: Rect, buf: &mut Buffer) {
let text = Paragraph::new(
"Login using `codex login` and then run this command again. 'q' to quit.",
);
text.render(area, buf);
}
}

View File

@@ -21,7 +21,7 @@ fn main() -> anyhow::Result<()> {
.config_overrides
.raw_overrides
.splice(0..0, top_cli.config_overrides.raw_overrides);
let usage = run_main(inner, codex_linux_sandbox_exe).await?;
let usage = run_main(inner, codex_linux_sandbox_exe)?;
println!("{}", codex_core::protocol::FinalOutput::from(usage));
Ok(())
})

View File

@@ -0,0 +1,77 @@
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::AtomicI32;
use std::sync::atomic::Ordering;
use tokio::runtime::Handle;
use tokio::time::Duration;
use tokio::time::sleep;
use crate::app_event::AppEvent;
use crate::app_event_sender::AppEventSender;
pub(crate) struct ScrollEventHelper {
app_event_tx: AppEventSender,
scroll_delta: Arc<AtomicI32>,
timer_scheduled: Arc<AtomicBool>,
runtime: Handle,
}
/// How long to wait after the first scroll event before sending the
/// accumulated scroll delta to the main thread.
const DEBOUNCE_WINDOW: Duration = Duration::from_millis(100);
/// Utility to debounce scroll events so we can determine the **magnitude** of
/// each scroll burst by accumulating individual wheel events over a short
/// window. The debounce timer now runs on Tokio so we avoid spinning up a new
/// operating-system thread for every burst.
impl ScrollEventHelper {
pub(crate) fn new(app_event_tx: AppEventSender) -> Self {
Self {
app_event_tx,
scroll_delta: Arc::new(AtomicI32::new(0)),
timer_scheduled: Arc::new(AtomicBool::new(false)),
runtime: Handle::current(),
}
}
pub(crate) fn scroll_up(&self) {
self.scroll_delta.fetch_sub(1, Ordering::Relaxed);
self.schedule_notification();
}
pub(crate) fn scroll_down(&self) {
self.scroll_delta.fetch_add(1, Ordering::Relaxed);
self.schedule_notification();
}
/// Starts a one-shot timer **only once** per burst of wheel events.
fn schedule_notification(&self) {
// If the timer is already scheduled, do nothing.
if self
.timer_scheduled
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
.is_err()
{
return;
}
// Otherwise, schedule a new timer.
let tx = self.app_event_tx.clone();
let delta = Arc::clone(&self.scroll_delta);
let timer_flag = Arc::clone(&self.timer_scheduled);
// Use self.runtime instead of tokio::spawn() because the calling thread
// in app.rs is not part of the Tokio runtime: it is a plain OS thread.
self.runtime.spawn(async move {
sleep(DEBOUNCE_WINDOW).await;
let accumulated = delta.swap(0, Ordering::SeqCst);
if accumulated != 0 {
tx.send(AppEvent::Scroll(accumulated));
}
timer_flag.store(false, Ordering::SeqCst);
});
}
}

View File

@@ -13,7 +13,6 @@ pub enum SlashCommand {
// DO NOT ALPHA-SORT! Enum order is presentation order in the popup, so
// more frequently used commands should be listed first.
New,
Compact,
Diff,
Quit,
}
@@ -23,9 +22,6 @@ impl SlashCommand {
pub fn description(self) -> &'static str {
match self {
SlashCommand::New => "Start a new chat.",
SlashCommand::Compact => {
"Summarize and compact the current conversation to free up context."
}
SlashCommand::Quit => "Exit the application.",
SlashCommand::Diff => {
"Show git diff of the working directory (including untracked files)"
@@ -44,58 +40,3 @@ impl SlashCommand {
pub fn built_in_slash_commands() -> Vec<(&'static str, SlashCommand)> {
SlashCommand::iter().map(|c| (c.command(), c)).collect()
}
#[cfg(test)]
mod tests {
use crate::app_event_sender::AppEventSender;
use crate::bottom_pane::chat_composer::ChatComposer;
use crossterm::event::KeyCode;
use insta::assert_snapshot;
use ratatui::Terminal;
use ratatui::backend::TestBackend;
use std::sync::mpsc;
#[test]
fn test_slash_commands() {
let (tx, _rx) = mpsc::channel();
let sender = AppEventSender::new(tx);
let mut composer = ChatComposer::new(true, sender);
let mut terminal = match Terminal::new(TestBackend::new(100, 10)) {
Ok(t) => t,
Err(e) => panic!("Failed to create terminal: {e}"),
};
// Initial empty state
if let Err(e) = terminal.draw(|f| f.render_widget_ref(&composer, f.area())) {
panic!("Failed to draw empty composer: {e}");
}
assert_snapshot!("empty_slash", terminal.backend());
// Type slash to show commands
let _ = composer.handle_key_event(crossterm::event::KeyEvent::new(
KeyCode::Char('/'),
crossterm::event::KeyModifiers::empty(),
));
if let Err(e) = terminal.draw(|f| f.render_widget_ref(&composer, f.area())) {
panic!("Failed to draw slash commands: {e}");
}
assert_snapshot!("slash_commands", terminal.backend());
// Type 'c' to filter to compact
let _ = composer.handle_key_event(crossterm::event::KeyEvent::new(
KeyCode::Char('c'),
crossterm::event::KeyModifiers::empty(),
));
if let Err(e) = terminal.draw(|f| f.render_widget_ref(&composer, f.area())) {
panic!("Failed to draw filtered commands: {e}");
}
assert_snapshot!("compact_filtered", terminal.backend());
// Select compact command - we don't check the final state since it's handled by the app layer
let _ = composer.handle_key_event(crossterm::event::KeyEvent::new(
KeyCode::Enter,
crossterm::event::KeyModifiers::empty(),
));
}
}

View File

@@ -1,14 +0,0 @@
---
source: tui/src/slash_command.rs
expression: terminal.backend()
---
"╭──────────────────────────────────────────────────────────────────────────────────────────────────╮"
"│/compact Summarize and compact the current conversation to free up context. │"
"╰──────────────────────────────────────────────────────────────────────────────────────────────────╯"
"╭──────────────────────────────────────────────────────────────────────────────────────────────────╮"
"│/c │"
"│ │"
"│ │"
"│ │"
"│ │"
"╰───────────────────────────────────────────────Enter to send | Ctrl+D to quit | Ctrl+J for newline╯"

View File

@@ -1,14 +0,0 @@
---
source: tui/src/slash_command.rs
expression: terminal.backend()
---
"╭──────────────────────────────────────────────────────────────────────────────────────────────────╮"
"│ send a message │"
"│ │"
"│ │"
"│ │"
"│ │"
"│ │"
"│ │"
"│ │"
"╰───────────────────────────────────────────────Enter to send | Ctrl+D to quit | Ctrl+J for newline╯"

View File

@@ -1,14 +0,0 @@
---
source: tui/src/slash_command.rs
expression: terminal.backend()
---
"╭──────────────────────────────────────────────────────────────────────────────────────────────────╮"
"│/new Start a new chat. │"
"│/compact Summarize and compact the current conversation to free up context. │"
"│/diff Show git diff of the working directory (including untracked files) │"
"│/quit Exit the application. │"
"│/toggle-mouse-mode Toggle mouse mode (enable for scrolling, disable for text selection) │"
"╰──────────────────────────────────────────────────────────────────────────────────────────────────╯"
"╭──────────────────────────────────────────────────────────────────────────────────────────────────╮"
"│/ │"
"╰───────────────────────────────────────────────Enter to send | Ctrl+D to quit | Ctrl+J for newline╯"

View File

@@ -1,3 +1,4 @@
use crate::cell_widget::CellWidget;
use ratatui::prelude::*;
/// A simple widget that just displays a list of `Line`s via a `Paragraph`.
@@ -12,3 +13,20 @@ impl TextBlock {
Self { lines }
}
}
impl CellWidget for TextBlock {
fn height(&self, width: u16) -> usize {
// Use the same wrapping configuration as ConversationHistoryWidget so
// measurement stays in sync with rendering.
ratatui::widgets::Paragraph::new(self.lines.clone())
.wrap(crate::conversation_history_widget::wrap_cfg())
.line_count(width)
}
fn render_window(&self, first_visible_line: usize, area: Rect, buf: &mut Buffer) {
ratatui::widgets::Paragraph::new(self.lines.clone())
.wrap(crate::conversation_history_widget::wrap_cfg())
.scroll((first_visible_line as u16, 0))
.render(area, buf);
}
}