mirror of
https://github.com/openai/codex.git
synced 2026-04-26 15:45:02 +00:00
Refactor cloud requirements error and surface in JSON-RPC error (#14504)
Refactors cloud requirements error handling to carry structured error metadata and surfaces that metadata through JSON-RPC config-load failures, including: * adds typed CloudRequirementsLoadErrorCode values plus optional statusCode * marks thread/start, thread/resume, and thread/fork config failures with structured cloud-requirements error data
This commit is contained in:
@@ -1,4 +1,5 @@
|
||||
use anyhow::Result;
|
||||
use app_test_support::ChatGptAuthFixture;
|
||||
use app_test_support::McpProcess;
|
||||
use app_test_support::create_apply_patch_sse_response;
|
||||
use app_test_support::create_fake_rollout_with_text_elements;
|
||||
@@ -8,6 +9,7 @@ use app_test_support::create_mock_responses_server_sequence_unchecked;
|
||||
use app_test_support::create_shell_command_sse_response;
|
||||
use app_test_support::rollout_path;
|
||||
use app_test_support::to_response;
|
||||
use app_test_support::write_chatgpt_auth;
|
||||
use chrono::Utc;
|
||||
use codex_app_server_protocol::AskForApproval;
|
||||
use codex_app_server_protocol::CommandExecutionApprovalDecision;
|
||||
@@ -36,6 +38,8 @@ use codex_app_server_protocol::TurnStartParams;
|
||||
use codex_app_server_protocol::TurnStartResponse;
|
||||
use codex_app_server_protocol::TurnStatus;
|
||||
use codex_app_server_protocol::UserInput;
|
||||
use codex_core::auth::AuthCredentialsStoreMode;
|
||||
use codex_core::auth::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR;
|
||||
use codex_protocol::ThreadId;
|
||||
use codex_protocol::config_types::Personality;
|
||||
use codex_protocol::models::ContentItem;
|
||||
@@ -60,6 +64,11 @@ use std::process::Command;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::timeout;
|
||||
use uuid::Uuid;
|
||||
use wiremock::Mock;
|
||||
use wiremock::MockServer;
|
||||
use wiremock::ResponseTemplate;
|
||||
use wiremock::matchers::method;
|
||||
use wiremock::matchers::path;
|
||||
|
||||
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
||||
const CODEX_5_2_INSTRUCTIONS_TEMPLATE_DEFAULT: &str = "You are Codex, a coding agent based on GPT-5. You and the user share the same workspace and collaborate to achieve the user's goals.";
|
||||
@@ -1409,6 +1418,98 @@ async fn thread_resume_fails_when_required_mcp_server_fails_to_initialize() -> R
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_resume_surfaces_cloud_requirements_load_errors() -> Result<()> {
|
||||
let server = MockServer::start().await;
|
||||
Mock::given(method("GET"))
|
||||
.and(path("/backend-api/wham/config/requirements"))
|
||||
.respond_with(
|
||||
ResponseTemplate::new(401)
|
||||
.insert_header("content-type", "text/html")
|
||||
.set_body_string("<html>nope</html>"),
|
||||
)
|
||||
.mount(&server)
|
||||
.await;
|
||||
Mock::given(method("POST"))
|
||||
.and(path("/oauth/token"))
|
||||
.respond_with(ResponseTemplate::new(401).set_body_json(json!({
|
||||
"error": { "code": "refresh_token_invalidated" }
|
||||
})))
|
||||
.mount(&server)
|
||||
.await;
|
||||
|
||||
let codex_home = TempDir::new()?;
|
||||
let model_server = create_mock_responses_server_repeating_assistant("Done").await;
|
||||
let chatgpt_base_url = format!("{}/backend-api", server.uri());
|
||||
create_config_toml_with_chatgpt_base_url(
|
||||
codex_home.path(),
|
||||
&model_server.uri(),
|
||||
&chatgpt_base_url,
|
||||
)?;
|
||||
write_chatgpt_auth(
|
||||
codex_home.path(),
|
||||
ChatGptAuthFixture::new("chatgpt-token")
|
||||
.refresh_token("stale-refresh-token")
|
||||
.plan_type("business")
|
||||
.chatgpt_user_id("user-123")
|
||||
.chatgpt_account_id("account-123")
|
||||
.account_id("account-123"),
|
||||
AuthCredentialsStoreMode::File,
|
||||
)?;
|
||||
let conversation_id = create_fake_rollout_with_text_elements(
|
||||
codex_home.path(),
|
||||
"2025-01-05T12-00-00",
|
||||
"2025-01-05T12:00:00Z",
|
||||
"Saved user message",
|
||||
Vec::new(),
|
||||
Some("mock_provider"),
|
||||
None,
|
||||
)?;
|
||||
let refresh_token_url = format!("{}/oauth/token", server.uri());
|
||||
let mut mcp = McpProcess::new_with_env(
|
||||
codex_home.path(),
|
||||
&[
|
||||
("OPENAI_API_KEY", None),
|
||||
(
|
||||
REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR,
|
||||
Some(refresh_token_url.as_str()),
|
||||
),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
||||
|
||||
let resume_id = mcp
|
||||
.send_thread_resume_request(ThreadResumeParams {
|
||||
thread_id: conversation_id,
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let err: JSONRPCError = timeout(
|
||||
DEFAULT_READ_TIMEOUT,
|
||||
mcp.read_stream_until_error_message(RequestId::Integer(resume_id)),
|
||||
)
|
||||
.await??;
|
||||
|
||||
assert!(
|
||||
err.error.message.contains("failed to load configuration"),
|
||||
"unexpected error message: {}",
|
||||
err.error.message
|
||||
);
|
||||
assert_eq!(
|
||||
err.error.data,
|
||||
Some(json!({
|
||||
"reason": "cloudRequirements",
|
||||
"errorCode": "Auth",
|
||||
"action": "relogin",
|
||||
"statusCode": 401,
|
||||
"detail": "Your access token could not be refreshed because your refresh token was revoked. Please log out and sign in again.",
|
||||
}))
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn thread_resume_prefers_path_over_thread_id() -> Result<()> {
|
||||
let server = create_mock_responses_server_repeating_assistant("Done").await;
|
||||
@@ -1734,6 +1835,37 @@ stream_max_retries = 0
|
||||
)
|
||||
}
|
||||
|
||||
fn create_config_toml_with_chatgpt_base_url(
|
||||
codex_home: &std::path::Path,
|
||||
server_uri: &str,
|
||||
chatgpt_base_url: &str,
|
||||
) -> std::io::Result<()> {
|
||||
let config_toml = codex_home.join("config.toml");
|
||||
std::fs::write(
|
||||
config_toml,
|
||||
format!(
|
||||
r#"
|
||||
model = "gpt-5.2-codex"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "read-only"
|
||||
chatgpt_base_url = "{chatgpt_base_url}"
|
||||
|
||||
model_provider = "mock_provider"
|
||||
|
||||
[features]
|
||||
personality = true
|
||||
|
||||
[model_providers.mock_provider]
|
||||
name = "Mock provider for test"
|
||||
base_url = "{server_uri}/v1"
|
||||
wire_api = "responses"
|
||||
request_max_retries = 0
|
||||
stream_max_retries = 0
|
||||
"#
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
fn create_config_toml_with_required_broken_mcp(
|
||||
codex_home: &std::path::Path,
|
||||
server_uri: &str,
|
||||
|
||||
Reference in New Issue
Block a user