Compare commits

..

7 Commits

Author SHA1 Message Date
Rahul Thathoo
ce323916de Validate API keys without decoding model catalog 2026-05-11 08:05:59 -07:00
Rahul Thathoo
4ec09d3030 Handle transient API key validation failures 2026-05-10 17:00:52 -07:00
Rahul Thathoo
cf1f255a5f Fix API key account test setup 2026-05-10 10:46:31 -07:00
Rahul Thathoo
c05edf927c Fix rate limit test argument comments 2026-05-10 10:39:45 -07:00
Rahul Thathoo
31be915e4f Fix API key auth status tests 2026-05-10 10:34:50 -07:00
Rahul Thathoo
8f2bc5b958 fix API key auth setup in rate limit tests 2026-05-10 10:26:51 -07:00
Rahul Thathoo
6661caf131 validate api key before login success 2026-05-09 16:38:42 -07:00
12 changed files with 349 additions and 76 deletions

View File

@@ -114,7 +114,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.131.0-alpha.5"
version = "0.0.0"
# Track the edition for all workspace crates in one place. Individual
# crates can still override this value, but keeping it here means new
# crates created with `cargo new -w ...` automatically inherit the 2024

View File

@@ -343,6 +343,7 @@ use codex_mcp::resolve_oauth_scopes;
use codex_memories_write::clear_memory_roots_contents;
use codex_model_provider::ProviderAccountError;
use codex_model_provider::create_model_provider;
use codex_model_provider::validate_api_key_with_models_endpoint;
use codex_models_manager::collaboration_mode_presets::builtin_collaboration_mode_presets;
use codex_protocol::ThreadId;
use codex_protocol::config_types::CollaborationMode;

View File

@@ -45,6 +45,10 @@ enum RefreshTokenRequestOutcome {
FailedPermanently,
}
fn api_key_validation_unavailable() -> JSONRPCErrorError {
invalid_request("Could not validate API key right now. Check your connection and try again.")
}
impl Drop for ActiveLogin {
fn drop(&mut self) {
self.cancel();
@@ -271,6 +275,8 @@ impl AccountRequestProcessor {
}
}
self.validate_api_key(&params.api_key).await?;
match login_with_api_key(
&self.config.codex_home,
&params.api_key,
@@ -284,6 +290,32 @@ impl AccountRequestProcessor {
}
}
async fn validate_api_key(&self, api_key: &str) -> Result<(), JSONRPCErrorError> {
if !self.config.model_provider.requires_openai_auth {
return Ok(());
}
validate_api_key_with_models_endpoint(self.config.model_provider.clone(), api_key)
.await
.map_err(|err| match err {
CodexErr::UnexpectedStatus(err) if matches!(err.status.as_u16(), 401 | 403) => {
invalid_request("API key is invalid or unusable.")
}
CodexErr::UnexpectedStatus(err)
if err.status.is_server_error() || matches!(err.status.as_u16(), 408 | 429) =>
{
api_key_validation_unavailable()
}
CodexErr::Timeout
| CodexErr::Stream(..)
| CodexErr::ResponseStreamFailed(_)
| CodexErr::ConnectionFailed(_)
| CodexErr::InternalServerError
| CodexErr::RetryLimit(_) => api_key_validation_unavailable(),
err => internal_error(format!("failed to validate api key: {err}")),
})
}
async fn login_api_key_v2(&self, request_id: ConnectionRequestId, params: LoginApiKeyParams) {
let result = self
.login_api_key_common(&params)

View File

@@ -15,6 +15,7 @@ use codex_app_server_protocol::RequestId;
use codex_config::types::AuthCredentialsStoreMode;
use codex_login::REFRESH_TOKEN_URL_OVERRIDE_ENV_VAR;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
@@ -76,6 +77,27 @@ shell_snapshot = false
)
}
fn create_config_toml_with_openai_base_url(
codex_home: &Path,
openai_base_url: &str,
) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "danger-full-access"
openai_base_url = "{openai_base_url}/v1"
[features]
shell_snapshot = false
"#
),
)
}
fn create_config_toml_forced_login(codex_home: &Path, forced_method: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
let contents = format!(
@@ -105,6 +127,13 @@ async fn login_with_api_key_via_request(mcp: &mut McpProcess, api_key: &str) ->
Ok(())
}
async fn setup_valid_api_key_provider(codex_home: &Path) -> Result<MockServer> {
let model_server = MockServer::start().await;
create_config_toml_with_openai_base_url(codex_home, &model_server.uri())?;
mock_valid_api_key(&model_server).await;
Ok(model_server)
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_no_auth() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -134,7 +163,7 @@ async fn get_auth_status_no_auth() -> Result<()> {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_with_api_key() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let _model_server = setup_valid_api_key_provider(codex_home.path()).await?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -195,7 +224,7 @@ async fn get_auth_status_with_api_key_when_auth_not_required() -> Result<()> {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_with_api_key_no_include_token() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let _model_server = setup_valid_api_key_provider(codex_home.path()).await?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -223,7 +252,7 @@ async fn get_auth_status_with_api_key_no_include_token() -> Result<()> {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_with_api_key_refresh_requested() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path())?;
let _model_server = setup_valid_api_key_provider(codex_home.path()).await?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -254,6 +283,17 @@ async fn get_auth_status_with_api_key_refresh_requested() -> Result<()> {
Ok(())
}
async fn mock_valid_api_key(server: &MockServer) {
Mock::given(method("GET"))
.and(path("/v1/models"))
.respond_with(ResponseTemplate::new(200).set_body_json(json!({
"models": []
})))
.expect(1)
.mount(server)
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_auth_status_omits_token_after_permanent_refresh_failure() -> Result<()> {
let codex_home = TempDir::new()?;

View File

@@ -172,6 +172,17 @@ async fn mock_device_code_oauth_token(server: &MockServer, id_token: &str) {
.await;
}
async fn mock_valid_api_key(server: &MockServer) {
Mock::given(method("GET"))
.and(path("/v1/models"))
.respond_with(ResponseTemplate::new(200).set_body_json(json!({
"models": []
})))
.expect(1)
.mount(server)
.await;
}
#[tokio::test]
async fn logout_account_removes_auth_and_notifies() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -883,7 +894,23 @@ async fn external_auth_refresh_invalid_access_token_fails_turn() -> Result<()> {
#[tokio::test]
async fn login_account_api_key_succeeds_and_notifies() -> Result<()> {
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), CreateConfigTomlParams::default())?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
Mock::given(method("GET"))
.and(path("/v1/models"))
.respond_with(ResponseTemplate::new(200).set_body_json(json!({
"models": []
})))
.expect(1)
.mount(&mock_server)
.await;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -928,6 +955,134 @@ async fn login_account_api_key_succeeds_and_notifies() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn login_account_api_key_accepts_openai_models_response_shape() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
Mock::given(method("GET"))
.and(path("/v1/models"))
.respond_with(ResponseTemplate::new(200).set_body_json(json!({
"object": "list",
"data": [
{
"id": "gpt-4.1",
"object": "model",
"created": 1_713_775_400,
"owned_by": "openai"
}
]
})))
.expect(1)
.mount(&mock_server)
.await;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_login_account_api_key_request("sk-test-key")
.await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let login: LoginAccountResponse = to_response(resp)?;
assert_eq!(login, LoginAccountResponse::ApiKey {});
assert!(codex_home.path().join("auth.json").exists());
Ok(())
}
#[tokio::test]
async fn login_account_api_key_rejects_unusable_key_before_persisting() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
Mock::given(method("GET"))
.and(path("/v1/models"))
.respond_with(ResponseTemplate::new(401).set_body_json(json!({
"error": { "message": "Invalid API key" }
})))
.expect(1)
.mount(&mock_server)
.await;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_login_account_api_key_request("sk-invalid-key")
.await?;
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(err.error.message, "API key is invalid or unusable.");
assert!(!codex_home.path().join("auth.json").exists());
Ok(())
}
#[tokio::test]
async fn login_account_api_key_validation_transient_failure_is_retryable() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
..Default::default()
},
)?;
Mock::given(method("GET"))
.and(path("/v1/models"))
.respond_with(ResponseTemplate::new(500).set_body_json(json!({
"error": { "message": "temporary outage" }
})))
.expect(1)
.mount(&mock_server)
.await;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_login_account_api_key_request("sk-test-key")
.await?;
let err: JSONRPCError = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_error_message(RequestId::Integer(request_id)),
)
.await??;
assert_eq!(
err.error.message,
"Could not validate API key right now. Check your connection and try again."
);
assert!(!codex_home.path().join("auth.json").exists());
Ok(())
}
#[tokio::test]
async fn login_account_api_key_rejected_when_forced_chatgpt() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -1488,13 +1643,16 @@ async fn get_account_no_auth() -> Result<()> {
#[tokio::test]
async fn get_account_with_api_key() -> Result<()> {
let codex_home = TempDir::new()?;
let model_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", model_server.uri())),
..Default::default()
},
)?;
mock_valid_api_key(&model_server).await;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;

View File

@@ -3,6 +3,7 @@ use app_test_support::ChatGptAuthFixture;
use app_test_support::McpProcess;
use app_test_support::to_response;
use app_test_support::write_chatgpt_auth;
use app_test_support::write_mock_responses_config_toml;
use codex_app_server_protocol::AddCreditsNudgeCreditType;
use codex_app_server_protocol::AddCreditsNudgeEmailStatus;
use codex_app_server_protocol::GetAccountRateLimitsResponse;
@@ -19,6 +20,7 @@ use codex_config::types::AuthCredentialsStoreMode;
use codex_protocol::account::PlanType as AccountPlanType;
use pretty_assertions::assert_eq;
use serde_json::json;
use std::collections::BTreeMap;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
@@ -61,6 +63,17 @@ async fn get_account_rate_limits_requires_auth() -> Result<()> {
#[tokio::test]
async fn get_account_rate_limits_requires_chatgpt_auth() -> Result<()> {
let codex_home = TempDir::new()?;
let model_server = MockServer::start().await;
write_mock_responses_config_toml(
codex_home.path(),
&model_server.uri(),
&BTreeMap::new(),
/*auto_compact_limit*/ 1024,
/* requires_openai_auth */ Some(true),
"mock_provider",
"compact",
)?;
mock_valid_api_key(&model_server).await;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -266,6 +279,17 @@ async fn send_add_credits_nudge_email_requires_auth() -> Result<()> {
#[tokio::test]
async fn send_add_credits_nudge_email_requires_chatgpt_auth() -> Result<()> {
let codex_home = TempDir::new()?;
let model_server = MockServer::start().await;
write_mock_responses_config_toml(
codex_home.path(),
&model_server.uri(),
&BTreeMap::new(),
/*auto_compact_limit*/ 1024,
/* requires_openai_auth */ Some(true),
"mock_provider",
"compact",
)?;
mock_valid_api_key(&model_server).await;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -450,6 +474,17 @@ async fn login_with_api_key(mcp: &mut McpProcess, api_key: &str) -> Result<()> {
Ok(())
}
async fn mock_valid_api_key(server: &MockServer) {
Mock::given(method("GET"))
.and(path("/v1/models"))
.respond_with(ResponseTemplate::new(200).set_body_json(json!({
"models": []
})))
.expect(1)
.mount(server)
.await;
}
fn write_chatgpt_base_url(codex_home: &Path, base_url: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
std::fs::write(config_toml, format!("chatgpt_base_url = \"{base_url}\"\n"))

View File

@@ -71,6 +71,26 @@ impl<T: HttpTransport> ModelsClient<T> {
Ok((models, header_etag))
}
pub async fn validate_access(
&self,
client_version: &str,
extra_headers: HeaderMap,
) -> Result<(), ApiError> {
self.session
.execute_with(
Method::GET,
Self::path(),
extra_headers,
/*body*/ None,
|req| {
Self::append_client_version_query(req, client_version);
},
)
.await?;
Ok(())
}
}
#[cfg(test)]

View File

@@ -7297,30 +7297,6 @@ async fn legacy_fast_service_tier_override_uses_priority_request_value() -> std:
Ok(())
}
#[tokio::test]
async fn config_toml_priority_service_tier_uses_priority_request_value() -> std::io::Result<()> {
let mut fixture = create_test_fixture()?;
fixture.cfg.service_tier = Some(ServiceTier::Fast.request_value().to_string());
let cwd = fixture.cwd_path();
let codex_home = fixture.codex_home();
let config = Config::load_from_base_config_with_overrides(
fixture.cfg,
ConfigOverrides {
cwd: Some(cwd),
..Default::default()
},
codex_home,
)
.await?;
assert_eq!(
config.service_tier,
Some(ServiceTier::Fast.request_value().to_string())
);
Ok(())
}
#[tokio::test]
async fn config_toml_service_tier_accepts_arbitrary_string() -> std::io::Result<()> {
let mut fixture = create_test_fixture()?;

View File

@@ -7,7 +7,6 @@ use codex_config::types::SessionPickerViewMode;
use codex_config::types::ToolSuggestDisabledTool;
use codex_features::FEATURES;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::config_types::TrustLevel;
use codex_protocol::openai_models::ReasoningEffort;
use std::collections::BTreeMap;
@@ -536,14 +535,9 @@ impl ConfigDocument {
}),
ConfigEdit::SetServiceTier { service_tier } => Ok(self.write_profile_value(
&["service_tier"],
service_tier.as_ref().map(|service_tier| {
let config_value = match ServiceTier::from_request_value(service_tier) {
Some(ServiceTier::Fast) => "fast",
Some(ServiceTier::Flex) => "flex",
None => service_tier.as_str(),
};
value(config_value)
}),
service_tier
.as_ref()
.map(|service_tier| value(service_tier.clone())),
)),
ConfigEdit::SetModelPersonality { personality } => Ok(self.write_profile_value(
&["personality"],

View File

@@ -3,7 +3,6 @@ use codex_config::types::AppToolApproval;
use codex_config::types::McpServerToolConfig;
use codex_config::types::McpServerTransportConfig;
use codex_config::types::SessionPickerViewMode;
use codex_protocol::config_types::ServiceTier;
use codex_protocol::openai_models::ReasoningEffort;
use pretty_assertions::assert_eq;
#[cfg(unix)]
@@ -33,34 +32,6 @@ model_reasoning_effort = "high"
assert_eq!(contents, expected);
}
#[test]
fn set_service_tier_saves_priority_as_fast() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
ConfigEditsBuilder::new(codex_home)
.set_service_tier(Some(ServiceTier::Fast.request_value().to_string()))
.apply_blocking()
.expect("persist");
let contents = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
assert_eq!(contents, "service_tier = \"fast\"\n");
}
#[test]
fn set_service_tier_preserves_unknown_service_tier() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
ConfigEditsBuilder::new(codex_home)
.set_service_tier(Some("experimental-tier-id".to_string()))
.apply_blocking()
.expect("persist");
let contents = std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
assert_eq!(contents, "service_tier = \"experimental-tier-id\"\n");
}
#[test]
fn builder_with_edits_applies_custom_paths() {
let tmp = tempdir().expect("tmpdir");

View File

@@ -9,6 +9,7 @@ pub use auth::unauthenticated_auth_provider;
pub use bearer_auth_provider::BearerAuthProvider;
pub use bearer_auth_provider::BearerAuthProvider as CoreAuthProvider;
pub use codex_protocol::account::ProviderAccount;
pub use models_endpoint::validate_api_key_with_models_endpoint;
pub use provider::ModelProvider;
pub use provider::ProviderAccountError;
pub use provider::ProviderAccountResult;

View File

@@ -16,6 +16,7 @@ use codex_login::CodexAuth;
use codex_login::collect_auth_env_telemetry;
use codex_login::default_client::build_reqwest_client;
use codex_model_provider_info::ModelProviderInfo;
use codex_models_manager::client_version_to_whole;
use codex_models_manager::manager::ModelsEndpointClient;
use codex_otel::TelemetryAuthMode;
use codex_protocol::error::CodexErr;
@@ -36,6 +37,7 @@ const MODELS_ENDPOINT: &str = "/models";
pub(crate) struct OpenAiModelsEndpoint {
provider_info: ModelProviderInfo,
auth_manager: Option<Arc<AuthManager>>,
auth_override: Option<CodexAuth>,
}
impl OpenAiModelsEndpoint {
@@ -46,10 +48,23 @@ impl OpenAiModelsEndpoint {
Self {
provider_info,
auth_manager,
auth_override: None,
}
}
fn with_auth(provider_info: ModelProviderInfo, auth: CodexAuth) -> Self {
Self {
provider_info,
auth_manager: None,
auth_override: Some(auth),
}
}
async fn auth(&self) -> Option<CodexAuth> {
if let Some(auth) = self.auth_override.as_ref() {
return Some(auth.clone());
}
match self.auth_manager.as_ref() {
Some(auth_manager) => auth_manager.auth().await,
None => None,
@@ -65,6 +80,15 @@ impl OpenAiModelsEndpoint {
}
}
pub async fn validate_api_key_with_models_endpoint(
provider_info: ModelProviderInfo,
api_key: &str,
) -> CoreResult<()> {
OpenAiModelsEndpoint::with_auth(provider_info, CodexAuth::from_api_key(api_key))
.validate_access(&client_version_to_whole())
.await
}
#[async_trait]
impl ModelsEndpointClient for OpenAiModelsEndpoint {
fn has_command_auth(&self) -> bool {
@@ -84,6 +108,34 @@ impl ModelsEndpointClient for OpenAiModelsEndpoint {
) -> CoreResult<(Vec<ModelInfo>, Option<String>)> {
let _timer =
codex_otel::start_global_timer("codex.remote_models.fetch_update.duration_ms", &[]);
let client = self.models_client().await?;
timeout(
MODELS_REFRESH_TIMEOUT,
client.list_models(client_version, HeaderMap::new()),
)
.await
.map_err(|_| CodexErr::Timeout)?
.map_err(map_api_error)
}
}
impl OpenAiModelsEndpoint {
async fn validate_access(&self, client_version: &str) -> CoreResult<()> {
let _timer =
codex_otel::start_global_timer("codex.remote_models.fetch_update.duration_ms", &[]);
let client = self.models_client().await?;
timeout(
MODELS_REFRESH_TIMEOUT,
client.validate_access(client_version, HeaderMap::new()),
)
.await
.map_err(|_| CodexErr::Timeout)?
.map_err(map_api_error)
}
async fn models_client(&self) -> CoreResult<ModelsClient<ReqwestTransport>> {
let auth = self.auth().await;
let auth_mode = auth.as_ref().map(CodexAuth::auth_mode);
let api_provider = self.provider_info.to_api_provider(auth_mode)?;
@@ -96,16 +148,9 @@ impl ModelsEndpointClient for OpenAiModelsEndpoint {
auth_header_name: auth_telemetry.name,
auth_env: self.auth_env(),
});
let client = ModelsClient::new(transport, api_provider, api_auth)
.with_telemetry(Some(request_telemetry));
timeout(
MODELS_REFRESH_TIMEOUT,
client.list_models(client_version, HeaderMap::new()),
)
.await
.map_err(|_| CodexErr::Timeout)?
.map_err(map_api_error)
Ok(ModelsClient::new(transport, api_provider, api_auth)
.with_telemetry(Some(request_telemetry)))
}
}