Compare commits

...

4 Commits

Author SHA1 Message Date
Ahmed Ibrahim
cea73e3566 fix 2026-02-12 11:18:34 -08:00
Ahmed Ibrahim
c7868ff39e fix 2026-02-12 11:17:52 -08:00
Ahmed Ibrahim
0ceee61ad3 fix 2026-02-12 11:13:12 -08:00
Ahmed Ibrahim
f730efcea4 prefix 2026-02-12 11:08:56 -08:00
2 changed files with 5 additions and 174 deletions

View File

@@ -138,41 +138,18 @@ impl ModelsManager {
/// Look up model metadata, applying remote overrides and config adjustments.
pub async fn get_model_info(&self, model: &str, config: &Config) -> ModelInfo {
let remote = self
.find_remote_model_by_longest_prefix(model, config)
.await;
.get_remote_models(config)
.await
.into_iter()
.find(|m| m.slug == model);
let model = if let Some(remote) = remote {
ModelInfo {
slug: model.to_string(),
..remote
}
remote
} else {
model_info::model_info_from_slug(model)
};
model_info::with_config_overrides(model, config)
}
async fn find_remote_model_by_longest_prefix(
&self,
model: &str,
config: &Config,
) -> Option<ModelInfo> {
let mut best: Option<ModelInfo> = None;
for candidate in self.get_remote_models(config).await {
if !model.starts_with(&candidate.slug) {
continue;
}
let is_better_match = if let Some(current) = best.as_ref() {
candidate.slug.len() > current.slug.len()
} else {
true
};
if is_better_match {
best = Some(candidate);
}
}
best
}
/// Refresh models if the provided ETag differs from the cached ETag.
///
/// Uses `Online` strategy to fetch latest models when ETags differ.

View File

@@ -55,152 +55,6 @@ use wiremock::MockServer;
const REMOTE_MODEL_SLUG: &str = "codex-test";
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn remote_models_get_model_info_uses_longest_matching_prefix() -> Result<()> {
skip_if_no_network!(Ok(()));
skip_if_sandbox!(Ok(()));
let server = MockServer::start().await;
let generic = test_remote_model_with_policy(
"gpt-5.3",
ModelVisibility::List,
1_000,
TruncationPolicyConfig::bytes(10_000),
);
let specific = test_remote_model_with_policy(
"gpt-5.3-codex",
ModelVisibility::List,
1_000,
TruncationPolicyConfig::bytes(10_000),
);
let specific = ModelInfo {
display_name: "GPT 5.3 Codex".to_string(),
base_instructions: "use specific prefix".to_string(),
..specific
};
let generic = ModelInfo {
display_name: "GPT 5.3".to_string(),
base_instructions: "use generic prefix".to_string(),
..generic
};
mount_models_once(
&server,
ModelsResponse {
models: vec![generic.clone(), specific.clone()],
},
)
.await;
let codex_home = TempDir::new()?;
let mut config = load_default_config_for_test(&codex_home).await;
config.features.enable(Feature::RemoteModels);
let auth = CodexAuth::create_dummy_chatgpt_auth_for_testing();
let provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let manager = codex_core::test_support::models_manager_with_provider(
codex_home.path().to_path_buf(),
codex_core::test_support::auth_manager_from_auth(auth),
provider,
);
manager
.list_models(&config, RefreshStrategy::OnlineIfUncached)
.await;
let model_info = manager.get_model_info("gpt-5.3-codex-test", &config).await;
assert_eq!(model_info.slug, "gpt-5.3-codex-test");
assert_eq!(model_info.base_instructions, specific.base_instructions);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn remote_models_long_model_slug_is_sent_with_high_reasoning() -> Result<()> {
skip_if_no_network!(Ok(()));
skip_if_sandbox!(Ok(()));
let server = MockServer::start().await;
let requested_model = "gpt-5.3-codex-test";
let prefix_model = "gpt-5.3-codex";
let mut remote_model = test_remote_model_with_policy(
prefix_model,
ModelVisibility::List,
1_000,
TruncationPolicyConfig::bytes(10_000),
);
remote_model.default_reasoning_level = Some(ReasoningEffort::High);
remote_model.supported_reasoning_levels = vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: ReasoningEffort::Medium.to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: ReasoningEffort::High.to_string(),
},
];
remote_model.supports_reasoning_summaries = true;
mount_models_once(
&server,
ModelsResponse {
models: vec![remote_model],
},
)
.await;
let response_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
)
.await;
let TestCodex {
codex, cwd, config, ..
} = test_codex()
.with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing())
.with_config(|config| {
config.features.enable(Feature::RemoteModels);
config.model = Some(requested_model.to_string());
})
.build(&server)
.await?;
codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "check model slug".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: cwd.path().to_path_buf(),
approval_policy: config.approval_policy.value(),
sandbox_policy: config.sandbox_policy.get().clone(),
model: requested_model.to_string(),
effort: None,
summary: config.model_reasoning_summary,
collaboration_mode: None,
personality: None,
})
.await?;
wait_for_event(&codex, |event| matches!(event, EventMsg::TurnComplete(_))).await;
let request = response_mock.single_request();
let body = request.body_json();
let reasoning_effort = body
.get("reasoning")
.and_then(|reasoning| reasoning.get("effort"))
.and_then(|value| value.as_str());
assert_eq!(body["model"].as_str(), Some(requested_model));
assert_eq!(reasoning_effort, Some("high"));
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
skip_if_no_network!(Ok(()));