Refactor cloud requirements error and surface in JSON-RPC error (#14504)

Refactors cloud requirements error handling to carry structured error
metadata and surfaces that metadata through JSON-RPC config-load
failures, including:
* adds typed CloudRequirementsLoadErrorCode values plus optional
statusCode
* marks thread/start, thread/resume, and thread/fork config failures
with structured cloud-requirements error data
This commit is contained in:
alexsong-oai
2026-03-12 20:30:51 -07:00
committed by GitHub
parent 0daffe667a
commit 650beb177e
9 changed files with 562 additions and 29 deletions

View File

@@ -1,7 +1,9 @@
use anyhow::Result;
use app_test_support::ChatGptAuthFixture;
use app_test_support::McpProcess;
use app_test_support::create_mock_responses_server_repeating_assistant;
use app_test_support::to_response;
use app_test_support::write_chatgpt_auth;
use codex_app_server_protocol::JSONRPCError;
use codex_app_server_protocol::JSONRPCMessage;
use codex_app_server_protocol::JSONRPCResponse;
@@ -11,15 +13,23 @@ use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::ThreadStartedNotification;
use codex_app_server_protocol::ThreadStatus;
use codex_app_server_protocol::ThreadStatusChangedNotification;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::auth::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR;
use codex_core::config::set_project_trust_level;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::openai_models::ReasoningEffort;
use pretty_assertions::assert_eq;
use serde_json::Value;
use serde_json::json;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
use wiremock::matchers::method;
use wiremock::matchers::path;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
@@ -318,6 +328,88 @@ async fn thread_start_fails_when_required_mcp_server_fails_to_initialize() -> Re
Ok(())
}
#[tokio::test]
async fn thread_start_surfaces_cloud_requirements_load_errors() -> Result<()> {
let server = MockServer::start().await;
Mock::given(method("GET"))
.and(path("/backend-api/wham/config/requirements"))
.respond_with(
ResponseTemplate::new(401)
.insert_header("content-type", "text/html")
.set_body_string("<html>nope</html>"),
)
.mount(&server)
.await;
Mock::given(method("POST"))
.and(path("/oauth/token"))
.respond_with(ResponseTemplate::new(401).set_body_json(json!({
"error": { "code": "refresh_token_invalidated" }
})))
.mount(&server)
.await;
let codex_home = TempDir::new()?;
let model_server = create_mock_responses_server_repeating_assistant("Done").await;
let chatgpt_base_url = format!("{}/backend-api", server.uri());
create_config_toml_with_chatgpt_base_url(
codex_home.path(),
&model_server.uri(),
&chatgpt_base_url,
)?;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("chatgpt-token")
.refresh_token("stale-refresh-token")
.plan_type("business")
.chatgpt_user_id("user-123")
.chatgpt_account_id("account-123")
.account_id("account-123"),
AuthCredentialsStoreMode::File,
)?;
let refresh_token_url = format!("{}/oauth/token", server.uri());
let mut mcp = McpProcess::new_with_env(
codex_home.path(),
&[
("OPENAI_API_KEY", None),
(
REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR,
Some(refresh_token_url.as_str()),
),
],
)
.await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let req_id = mcp
.send_thread_start_request(ThreadStartParams::default())
.await?;
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(req_id)),
)
.await??;
assert!(
err.error.message.contains("failed to load configuration"),
"unexpected error message: {}",
err.error.message
);
assert_eq!(
err.error.data,
Some(json!({
"reason": "cloudRequirements",
"errorCode": "Auth",
"action": "relogin",
"statusCode": 401,
"detail": "Your access token could not be refreshed because your refresh token was revoked. Please log out and sign in again.",
}))
);
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
@@ -342,6 +434,34 @@ stream_max_retries = 0
)
}
fn create_config_toml_with_chatgpt_base_url(
codex_home: &Path,
server_uri: &str,
chatgpt_base_url: &str,
) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
chatgpt_base_url = "{chatgpt_base_url}"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}
fn create_config_toml_with_required_broken_mcp(
codex_home: &Path,
server_uri: &str,