Revert "Option to Notify Workspace Owner When Usage Limit is Reached" (#17391)

Reverts openai/codex#16969

#sev3-2026-04-10-accountscheckversion-500s-for-openai-workspace-7300
This commit is contained in:
Shijie Rao
2026-04-10 16:33:13 -07:00
committed by GitHub
parent a3be74143a
commit 930e5adb7e
82 changed files with 60 additions and 3233 deletions

View File

@@ -28,7 +28,6 @@ use codex_app_server_protocol::ServerNotification;
use codex_app_server_protocol::ServerRequest;
use codex_app_server_protocol::TurnCompletedNotification;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::WorkspaceRole;
use codex_config::types::AuthCredentialsStoreMode;
use codex_login::login_with_api_key;
use codex_protocol::account::PlanType as AccountPlanType;
@@ -56,7 +55,6 @@ struct CreateConfigTomlParams {
forced_workspace_id: Option<String>,
requires_openai_auth: Option<bool>,
base_url: Option<String>,
chatgpt_base_url: Option<String>,
}
fn create_config_toml(codex_home: &Path, params: CreateConfigTomlParams) -> std::io::Result<()> {
@@ -64,9 +62,6 @@ fn create_config_toml(codex_home: &Path, params: CreateConfigTomlParams) -> std:
let base_url = params
.base_url
.unwrap_or_else(|| "http://127.0.0.1:0/v1".to_string());
let chatgpt_base_url = params
.chatgpt_base_url
.unwrap_or_else(|| "http://127.0.0.1:0/backend-api".to_string());
let forced_line = if let Some(method) = params.forced_method {
format!("forced_login_method = \"{method}\"\n")
} else {
@@ -87,7 +82,6 @@ fn create_config_toml(codex_home: &Path, params: CreateConfigTomlParams) -> std:
model = "mock-model"
approval_policy = "never"
sandbox_mode = "danger-full-access"
chatgpt_base_url = "{chatgpt_base_url}"
{forced_line}
{forced_workspace_line}
@@ -128,49 +122,6 @@ async fn mock_device_code_usercode_failure(server: &MockServer, status: u16) {
.await;
}
async fn mock_accounts_check_role(server: &MockServer, account_id: &str, role: &str) {
Mock::given(method("GET"))
.and(path("/backend-api/accounts/check/v4-2023-04-27"))
.respond_with(ResponseTemplate::new(200).set_body_json(json!({
"accounts": {
account_id: {
"account": {
"account_user_role": role,
}
}
},
"account_ordering": [account_id],
})))
.mount(server)
.await;
}
async fn mock_slow_accounts_check_role(
server: &MockServer,
account_id: &str,
role: &str,
delay: Duration,
) {
Mock::given(method("GET"))
.and(path("/backend-api/accounts/check/v4-2023-04-27"))
.respond_with(
ResponseTemplate::new(200)
.set_delay(delay)
.set_body_json(json!({
"accounts": {
account_id: {
"account": {
"account_user_role": role,
}
}
},
"account_ordering": [account_id],
})),
)
.mount(server)
.await;
}
async fn mock_device_code_token_success(server: &MockServer) {
Mock::given(method("POST"))
.and(path("/api/accounts/deviceauth/token"))
@@ -270,12 +221,10 @@ async fn set_auth_token_updates_account_and_notifies() -> Result<()> {
CreateConfigTomlParams {
requires_openai_auth: Some(true),
base_url: Some(format!("{}/v1", mock_server.uri())),
chatgpt_base_url: Some(format!("{}/backend-api", mock_server.uri())),
..Default::default()
},
)?;
write_models_cache(codex_home.path())?;
mock_accounts_check_role(&mock_server, "org-embedded", "standard-user").await;
let access_token = encode_id_token(
&ChatGptIdTokenClaims::new()
@@ -313,20 +262,6 @@ async fn set_auth_token_updates_account_and_notifies() -> Result<()> {
};
assert_eq!(payload.auth_mode, Some(AuthMode::ChatgptAuthTokens));
assert_eq!(payload.plan_type, Some(AccountPlanType::Pro));
assert_eq!(payload.workspace_role, None);
assert_eq!(payload.is_workspace_owner, None);
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountUpdated(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
assert_eq!(payload.workspace_role, Some(WorkspaceRole::StandardUser));
assert_eq!(payload.is_workspace_owner, Some(false));
let get_id = mcp
.send_get_account_request(GetAccountParams {
@@ -346,8 +281,6 @@ async fn set_auth_token_updates_account_and_notifies() -> Result<()> {
email: "embedded@example.com".to_string(),
plan_type: AccountPlanType::Pro,
}),
workspace_role: None,
is_workspace_owner: None,
requires_openai_auth: true,
}
);
@@ -415,8 +348,6 @@ async fn account_read_refresh_token_is_noop_in_external_mode() -> Result<()> {
email: "embedded@example.com".to_string(),
plan_type: AccountPlanType::Pro,
}),
workspace_role: None,
is_workspace_owner: None,
requires_openai_auth: true,
}
);
@@ -1574,8 +1505,6 @@ async fn get_account_with_api_key() -> Result<()> {
let expected = GetAccountResponse {
account: Some(Account::ApiKey {}),
workspace_role: None,
is_workspace_owner: None,
requires_openai_auth: true,
};
assert_eq!(received, expected);
@@ -1610,8 +1539,6 @@ async fn get_account_when_auth_not_required() -> Result<()> {
let expected = GetAccountResponse {
account: None,
workspace_role: None,
is_workspace_owner: None,
requires_openai_auth: false,
};
assert_eq!(received, expected);
@@ -1632,8 +1559,7 @@ async fn get_account_with_chatgpt() -> Result<()> {
codex_home.path(),
ChatGptAuthFixture::new("access-chatgpt")
.email("user@example.com")
.plan_type("pro")
.is_org_owner(/*is_org_owner*/ true),
.plan_type("pro"),
AuthCredentialsStoreMode::File,
)?;
@@ -1657,190 +1583,12 @@ async fn get_account_with_chatgpt() -> Result<()> {
email: "user@example.com".to_string(),
plan_type: AccountPlanType::Pro,
}),
workspace_role: None,
is_workspace_owner: Some(true),
requires_openai_auth: true,
};
assert_eq!(received, expected);
Ok(())
}
#[tokio::test]
async fn get_account_with_chatgpt_emits_workspace_role_from_accounts_check() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
chatgpt_base_url: Some(format!("{}/backend-api", mock_server.uri())),
..Default::default()
},
)?;
mock_accounts_check_role(&mock_server, "org-embedded", "account-owner").await;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("access-chatgpt")
.account_id("org-embedded")
.email("user@example.com")
.plan_type("pro")
.chatgpt_account_id("org-embedded"),
AuthCredentialsStoreMode::File,
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let params = GetAccountParams {
refresh_token: false,
};
let request_id = mcp.send_get_account_request(params).await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let received: GetAccountResponse = to_response(resp)?;
let expected = GetAccountResponse {
account: Some(Account::Chatgpt {
email: "user@example.com".to_string(),
plan_type: AccountPlanType::Pro,
}),
workspace_role: None,
is_workspace_owner: None,
requires_openai_auth: true,
};
assert_eq!(received, expected);
let note = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/updated"),
)
.await??;
let parsed: ServerNotification = note.try_into()?;
let ServerNotification::AccountUpdated(payload) = parsed else {
bail!("unexpected notification: {parsed:?}");
};
assert_eq!(payload.workspace_role, Some(WorkspaceRole::AccountOwner));
assert_eq!(payload.is_workspace_owner, Some(true));
Ok(())
}
#[tokio::test]
async fn get_account_with_chatgpt_does_not_guess_workspace_role_from_other_accounts() -> Result<()>
{
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
chatgpt_base_url: Some(format!("{}/backend-api", mock_server.uri())),
..Default::default()
},
)?;
mock_accounts_check_role(&mock_server, "org-other", "account-owner").await;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("access-chatgpt")
.account_id("org-current")
.email("user@example.com")
.plan_type("pro")
.chatgpt_account_id("org-current"),
AuthCredentialsStoreMode::File,
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let params = GetAccountParams {
refresh_token: false,
};
let request_id = mcp.send_get_account_request(params).await?;
let resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let received: GetAccountResponse = to_response(resp)?;
let expected = GetAccountResponse {
account: Some(Account::Chatgpt {
email: "user@example.com".to_string(),
plan_type: AccountPlanType::Pro,
}),
workspace_role: None,
is_workspace_owner: None,
requires_openai_auth: true,
};
assert_eq!(received, expected);
Ok(())
}
#[tokio::test]
async fn get_account_with_chatgpt_does_not_wait_for_accounts_check() -> Result<()> {
let codex_home = TempDir::new()?;
let mock_server = MockServer::start().await;
create_config_toml(
codex_home.path(),
CreateConfigTomlParams {
requires_openai_auth: Some(true),
chatgpt_base_url: Some(format!("{}/backend-api", mock_server.uri())),
..Default::default()
},
)?;
mock_slow_accounts_check_role(
&mock_server,
"org-embedded",
"standard-user",
Duration::from_secs(2),
)
.await;
write_chatgpt_auth(
codex_home.path(),
ChatGptAuthFixture::new("access-chatgpt")
.account_id("org-embedded")
.email("user@example.com")
.plan_type("pro")
.chatgpt_account_id("org-embedded")
.is_org_owner(/*is_org_owner*/ true),
AuthCredentialsStoreMode::File,
)?;
let mut mcp = McpProcess::new_with_env(codex_home.path(), &[("OPENAI_API_KEY", None)]).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_get_account_request(GetAccountParams {
refresh_token: false,
})
.await?;
let resp: JSONRPCResponse = timeout(
Duration::from_millis(500),
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let received: GetAccountResponse = to_response(resp)?;
assert_eq!(
received,
GetAccountResponse {
account: Some(Account::Chatgpt {
email: "user@example.com".to_string(),
plan_type: AccountPlanType::Pro,
}),
workspace_role: None,
is_workspace_owner: Some(true),
requires_openai_auth: true,
}
);
Ok(())
}
#[tokio::test]
async fn get_account_with_chatgpt_missing_plan_claim_returns_unknown() -> Result<()> {
let codex_home = TempDir::new()?;
@@ -1877,8 +1625,6 @@ async fn get_account_with_chatgpt_missing_plan_claim_returns_unknown() -> Result
email: "user@example.com".to_string(),
plan_type: AccountPlanType::Unknown,
}),
workspace_role: None,
is_workspace_owner: None,
requires_openai_auth: true,
};
assert_eq!(received, expected);

View File

@@ -32,7 +32,6 @@ mod request_user_input;
mod review;
mod safety_check_downgrade;
mod skills_list;
mod thread_add_credits_nudge_email;
mod thread_archive;
mod thread_fork;
mod thread_list;

View File

@@ -133,10 +133,7 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
}
}
}
],
"spend_control": {
"reached": true
}
]
});
Mock::given(method("GET"))
@@ -175,7 +172,6 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
resets_at: Some(secondary_reset_timestamp),
}),
credits: None,
spend_control: Some(codex_app_server_protocol::SpendControlSnapshot { reached: true }),
plan_type: Some(AccountPlanType::Pro),
},
rate_limits_by_limit_id: Some(
@@ -196,9 +192,6 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
resets_at: Some(secondary_reset_timestamp),
}),
credits: None,
spend_control: Some(codex_app_server_protocol::SpendControlSnapshot {
reached: true,
}),
plan_type: Some(AccountPlanType::Pro),
},
),
@@ -214,7 +207,6 @@ async fn get_account_rate_limits_returns_snapshot() -> Result<()> {
}),
secondary: None,
credits: None,
spend_control: None,
plan_type: Some(AccountPlanType::Pro),
},
),

View File

@@ -1,96 +0,0 @@
use anyhow::Result;
use app_test_support::McpProcess;
use app_test_support::create_mock_responses_server_sequence;
use app_test_support::to_response;
use codex_app_server_protocol::AddCreditsNudgeEmailNotification;
use codex_app_server_protocol::AddCreditsNudgeEmailResult;
use codex_app_server_protocol::JSONRPCResponse;
use codex_app_server_protocol::RequestId;
use codex_app_server_protocol::ThreadAddCreditsNudgeEmailParams;
use codex_app_server_protocol::ThreadAddCreditsNudgeEmailResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use pretty_assertions::assert_eq;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
#[tokio::test]
async fn thread_add_credits_nudge_email_submits_core_op_and_emits_completion() -> Result<()> {
let tmp = TempDir::new()?;
let codex_home = tmp.path().join("codex_home");
std::fs::create_dir(&codex_home)?;
let server = create_mock_responses_server_sequence(vec![]).await;
create_config_toml(codex_home.as_path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let start_id = mcp
.send_thread_start_request(ThreadStartParams::default())
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
let nudge_id = mcp
.send_thread_add_credits_nudge_email_request(ThreadAddCreditsNudgeEmailParams {
thread_id: thread.id.clone(),
})
.await?;
let nudge_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(nudge_id)),
)
.await??;
let _: ThreadAddCreditsNudgeEmailResponse =
to_response::<ThreadAddCreditsNudgeEmailResponse>(nudge_resp)?;
let notification: AddCreditsNudgeEmailNotification = serde_json::from_value(
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("account/addCreditsNudgeEmail/completed"),
)
.await??
.params
.expect("account/addCreditsNudgeEmail/completed params"),
)?;
assert_eq!(notification.thread_id, thread.id);
assert_eq!(
notification.result,
AddCreditsNudgeEmailResult::Failed {
message: "codex account authentication required to notify workspace owner".to_string(),
}
);
Ok(())
}
fn create_config_toml(codex_home: &Path, server_uri: &str) -> std::io::Result<()> {
std::fs::write(
codex_home.join("config.toml"),
format!(
r#"
model = "mock-model"
approval_policy = "never"
sandbox_mode = "read-only"
model_provider = "mock_provider"
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"
wire_api = "responses"
request_max_retries = 0
stream_max_retries = 0
"#
),
)
}