Add openai_base_url config override for built-in provider (#12031)

We regularly get bug reports from users who mistakenly have the
`OPENAI_BASE_URL` environment variable set. This PR deprecates this
environment variable in favor of a top-level config key
`openai_base_url` that is used for the same purpose. By making it a
config key, it will be more visible to users. It will also participate
in all of the infrastructure we've added for layered and managed
configs.

Summary
- introduce the `openai_base_url` top-level config key, update
schema/tests, and route the built-in openai provider through it while
- fall back to deprecated `OPENAI_BASE_URL` env var but warn user of
deprecation when no `openai_base_url` config key is present
- update CLI, SDK, and TUI code to prefer the new config path (with a
deprecated env-var fallback) and document the SDK behavior change
This commit is contained in:
Eric Traut
2026-03-13 20:12:25 -06:00
committed by GitHub
parent b859a98e0f
commit 4b9d5c8c1b
21 changed files with 233 additions and 70 deletions

View File

@@ -226,7 +226,7 @@ impl TestCodexBuilder {
) -> anyhow::Result<(Config, Arc<TempDir>)> {
let model_provider = ModelProviderInfo {
base_url: Some(base_url),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let cwd = Arc::new(TempDir::new()?);
let mut config = load_default_config_for_test(home).await;

View File

@@ -23,7 +23,8 @@ impl TestCodexExecBuilder {
pub fn cmd_with_server(&self, server: &MockServer) -> assert_cmd::Command {
let mut cmd = self.cmd();
let base = format!("{}/v1", server.uri());
cmd.env("OPENAI_BASE_URL", base);
cmd.arg("-c")
.arg(format!("openai_base_url={}", toml_string_literal(&base)));
cmd
}
@@ -35,6 +36,10 @@ impl TestCodexExecBuilder {
}
}
fn toml_string_literal(value: &str) -> String {
serde_json::to_string(value).expect("serialize TOML string literal")
}
pub fn test_codex_exec() -> TestCodexExecBuilder {
TestCodexExecBuilder {
home: TempDir::new().expect("create temp home"),

View File

@@ -52,8 +52,7 @@ async fn responses_mode_stream_cli() {
.arg(&repo_root)
.arg("hello?");
cmd.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy")
.env("OPENAI_BASE_URL", format!("{}/v1", server.uri()));
.env("OPENAI_API_KEY", "dummy");
let output = cmd.output().unwrap();
println!("Status: {}", output.status);
@@ -89,6 +88,75 @@ async fn responses_mode_stream_cli() {
// assert!(page.items[0].created_at.is_some(), "missing created_at");
}
/// Ensures `OPENAI_BASE_URL` still works as a deprecated fallback.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn responses_mode_stream_cli_supports_openai_base_url_env_fallback() {
skip_if_no_network!();
let server = MockServer::start().await;
let repo_root = repo_root();
let sse = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "hi"),
responses::ev_completed("resp-1"),
]);
let resp_mock = responses::mount_sse_once(&server, sse).await;
let home = TempDir::new().unwrap();
let bin = codex_utils_cargo_bin::cargo_bin("codex").unwrap();
let mut cmd = AssertCommand::new(bin);
cmd.timeout(Duration::from_secs(30));
cmd.arg("exec")
.arg("--skip-git-repo-check")
.arg("-C")
.arg(&repo_root)
.arg("hello?");
cmd.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy")
.env("OPENAI_BASE_URL", format!("{}/v1", server.uri()));
let output = cmd.output().unwrap();
assert!(output.status.success());
let request = resp_mock.single_request();
assert_eq!(request.path(), "/v1/responses");
}
/// Ensures `openai_base_url` config override routes built-in openai provider requests.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn responses_mode_stream_cli_supports_openai_base_url_config_override() {
skip_if_no_network!();
let server = MockServer::start().await;
let repo_root = repo_root();
let sse = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "hi"),
responses::ev_completed("resp-1"),
]);
let resp_mock = responses::mount_sse_once(&server, sse).await;
let home = TempDir::new().unwrap();
let bin = codex_utils_cargo_bin::cargo_bin("codex").unwrap();
let mut cmd = AssertCommand::new(bin);
cmd.timeout(Duration::from_secs(30));
cmd.arg("exec")
.arg("--skip-git-repo-check")
.arg("-c")
.arg(format!("openai_base_url=\"{}/v1\"", server.uri()))
.arg("-C")
.arg(&repo_root)
.arg("hello?");
cmd.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy");
let output = cmd.output().unwrap();
assert!(output.status.success());
let request = resp_mock.single_request();
assert_eq!(request.path(), "/v1/responses");
}
/// Verify that passing `-c model_instructions_file=...` to the CLI
/// overrides the built-in base instructions by inspecting the request body
/// received by a mock OpenAI Responses endpoint.
@@ -136,8 +204,7 @@ async fn exec_cli_applies_model_instructions_file() {
.arg(&repo_root)
.arg("hello?\n");
cmd.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy")
.env("OPENAI_BASE_URL", format!("{}/v1", server.uri()));
.env("OPENAI_API_KEY", "dummy");
let output = cmd.output().unwrap();
println!("Status: {}", output.status);
@@ -247,13 +314,14 @@ async fn responses_api_stream_cli() {
let mut cmd = AssertCommand::new(bin);
cmd.arg("exec")
.arg("--skip-git-repo-check")
.arg("-c")
.arg("openai_base_url=\"http://unused.local\"")
.arg("-C")
.arg(&repo_root)
.arg("hello?");
cmd.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy")
.env("CODEX_RS_SSE_FIXTURE", fixture)
.env("OPENAI_BASE_URL", "http://unused.local");
.env("CODEX_RS_SSE_FIXTURE", fixture);
let output = cmd.output().unwrap();
assert!(output.status.success());
@@ -283,14 +351,14 @@ async fn integration_creates_and_checks_session_file() -> anyhow::Result<()> {
let mut cmd = AssertCommand::new(bin);
cmd.arg("exec")
.arg("--skip-git-repo-check")
.arg("-c")
.arg("openai_base_url=\"http://unused.local\"")
.arg("-C")
.arg(&repo_root)
.arg(&prompt);
cmd.env("CODEX_HOME", home.path())
.env(CODEX_API_KEY_ENV_VAR, "dummy")
.env("CODEX_RS_SSE_FIXTURE", &fixture)
// Required for CLI arg parsing even though fixture short-circuits network usage.
.env("OPENAI_BASE_URL", "http://unused.local");
.env("CODEX_RS_SSE_FIXTURE", &fixture);
let output = cmd.output().unwrap();
assert!(
@@ -404,6 +472,8 @@ async fn integration_creates_and_checks_session_file() -> anyhow::Result<()> {
let mut cmd2 = AssertCommand::new(bin2);
cmd2.arg("exec")
.arg("--skip-git-repo-check")
.arg("-c")
.arg("openai_base_url=\"http://unused.local\"")
.arg("-C")
.arg(&repo_root)
.arg(&prompt2)
@@ -411,8 +481,7 @@ async fn integration_creates_and_checks_session_file() -> anyhow::Result<()> {
.arg("--last");
cmd2.env("CODEX_HOME", home.path())
.env("OPENAI_API_KEY", "dummy")
.env("CODEX_RS_SSE_FIXTURE", &fixture)
.env("OPENAI_BASE_URL", "http://unused.local");
.env("CODEX_RS_SSE_FIXTURE", &fixture);
let output2 = cmd2.output().unwrap();
assert!(output2.status.success(), "resume codex-cli run failed");

View File

@@ -715,7 +715,7 @@ async fn chatgpt_auth_sends_correct_request() {
)
.await;
let mut model_provider = built_in_model_providers()["openai"].clone();
let mut model_provider = built_in_model_providers(/* openai_base_url */ None)["openai"].clone();
model_provider.base_url = Some(format!("{}/api/codex", server.uri()));
let mut builder = test_codex()
.with_auth(create_dummy_codex_auth())
@@ -791,7 +791,7 @@ async fn prefers_apikey_when_config_prefers_apikey_even_with_chatgpt_tokens() {
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
// Init session
@@ -1977,7 +1977,7 @@ async fn token_count_includes_rate_limits_snapshot() {
.mount(&server)
.await;
let mut provider = built_in_model_providers()["openai"].clone();
let mut provider = built_in_model_providers(/* openai_base_url */ None)["openai"].clone();
provider.base_url = Some(format!("{}/v1", server.uri()));
let mut builder = test_codex()

View File

@@ -93,7 +93,7 @@ fn json_fragment(text: &str) -> String {
}
fn non_openai_model_provider(server: &MockServer) -> ModelProviderInfo {
let mut provider = built_in_model_providers()["openai"].clone();
let mut provider = built_in_model_providers(/* openai_base_url */ None)["openai"].clone();
provider.name = "OpenAI (test)".into();
provider.base_url = Some(format!("{}/v1", server.uri()));
provider

View File

@@ -95,7 +95,7 @@ async fn remote_models_get_model_info_uses_longest_matching_prefix() -> Result<(
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@@ -654,7 +654,7 @@ async fn remote_models_do_not_append_removed_builtin_presets() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@@ -709,7 +709,7 @@ async fn remote_models_merge_adds_new_high_priority_first() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@@ -756,7 +756,7 @@ async fn remote_models_merge_replaces_overlapping_model() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@@ -800,7 +800,7 @@ async fn remote_models_merge_preserves_bundled_models_on_empty_response() -> Res
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@@ -841,7 +841,7 @@ async fn remote_models_request_times_out_after_5s() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
@@ -907,7 +907,7 @@ async fn remote_models_hide_picker_only_models() -> Result<()> {
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
..built_in_model_providers(/* openai_base_url */ None)["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),