Enforce user input length cap (#12823)

Currently there is no bound on the length of a user message submitted in
the TUI or through the app server interface. That means users can paste
many megabytes of text, which can lead to bad performance, hangs, and
crashes. In extreme cases, it can lead to a [kernel
panic](https://github.com/openai/codex/issues/12323).

This PR limits the length of a user input to 2**20 (about 1M)
characters. This value was chosen because it fills the entire context
window on the latest models, so accepting longer inputs wouldn't make
sense anyway.

Summary
- add a shared `MAX_USER_INPUT_TEXT_CHARS` constant in codex-protocol
and surface it in TUI and app server code
- block oversized submissions in the TUI submit flow and emit error
history cells when validation fails
- reject heavy app-server requests with JSON-RPC `-32602` and structured
`input_too_large` data, plus document the behavior

Testing
- ran the IDE extension with this change and verified that when I
attempt to paste a user message that's several MB long, it correctly
reports an error instead of crashing or making my computer hot.
This commit is contained in:
Eric Traut
2026-02-25 22:23:51 -08:00
committed by GitHub
parent 9a96b6f509
commit 28bfbb8f2b
13 changed files with 676 additions and 0 deletions

View File

@@ -9,6 +9,8 @@ use app_test_support::create_mock_responses_server_sequence_unchecked;
use app_test_support::create_shell_command_sse_response;
use app_test_support::format_with_current_shell_display;
use app_test_support::to_response;
use codex_app_server::INPUT_TOO_LARGE_ERROR_CODE;
use codex_app_server::INVALID_PARAMS_ERROR_CODE;
use codex_app_server_protocol::ByteRange;
use codex_app_server_protocol::ClientInfo;
use codex_app_server_protocol::CommandExecutionApprovalDecision;
@@ -19,6 +21,7 @@ use codex_app_server_protocol::FileChangeOutputDeltaNotification;
use codex_app_server_protocol::FileChangeRequestApprovalResponse;
use codex_app_server_protocol::ItemCompletedNotification;
use codex_app_server_protocol::ItemStartedNotification;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCNotification;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::PatchApplyStatus;
@@ -45,6 +48,7 @@ use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::Settings;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::user_input::MAX_USER_INPUT_TEXT_CHARS;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
@@ -223,6 +227,143 @@ async fn turn_start_emits_user_message_item_with_text_elements() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_text_at_limit_with_mention_item() -> Result<()> {
let responses = vec![create_final_assistant_message_sse_response("Done")?];
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id,
input: vec![
V2UserInput::Text {
text: "x".repeat(MAX_USER_INPUT_TEXT_CHARS),
text_elements: Vec::new(),
},
V2UserInput::Mention {
name: "Demo App".to_string(),
path: "app://demo-app".to_string(),
},
],
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let TurnStartResponse { turn } = to_response::<TurnStartResponse>(turn_resp)?;
assert_eq!(turn.status, TurnStatus::InProgress);
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
Ok(())
}
#[tokio::test]
async fn turn_start_rejects_combined_oversized_text_input() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
"http://localhost/unused",
"never",
&BTreeMap::from([(Feature::Personality, true)]),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("mock-model".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let first = "x".repeat(MAX_USER_INPUT_TEXT_CHARS / 2);
let second = "y".repeat(MAX_USER_INPUT_TEXT_CHARS / 2 + 1);
let actual_chars = first.chars().count() + second.chars().count();
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id,
input: vec![
V2UserInput::Text {
text: first,
text_elements: Vec::new(),
},
V2UserInput::Text {
text: second,
text_elements: Vec::new(),
},
],
..Default::default()
})
.await?;
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(turn_req)),
)
.await??;
assert_eq!(err.error.code, INVALID_PARAMS_ERROR_CODE);
assert_eq!(
err.error.message,
format!("Input exceeds the maximum length of {MAX_USER_INPUT_TEXT_CHARS} characters.")
);
let data = err.error.data.expect("expected structured error data");
assert_eq!(data["input_error_code"], INPUT_TOO_LARGE_ERROR_CODE);
assert_eq!(data["max_chars"], MAX_USER_INPUT_TEXT_CHARS);
assert_eq!(data["actual_chars"], actual_chars);
let turn_started = tokio::time::timeout(
std::time::Duration::from_millis(250),
mcp.read_stream_until_notification_message("turn/started"),
)
.await;
assert!(
turn_started.is_err(),
"did not expect a turn/started notification for rejected input"
);
Ok(())
}
#[tokio::test]
async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<()> {
// Provide a mock server and config so model wiring is valid.