adding tests ; linting

This commit is contained in:
pap
2025-08-04 18:40:58 +01:00
parent 304d01c099
commit c2cf4a3cb9
3 changed files with 143 additions and 10 deletions

View File

@@ -107,7 +107,7 @@ pub enum CodexErr {
// Ollamaspecific errors
// ------------------------------
#[error(
"no running Ollama server detected; consider starting it or installing: https://github.com/ollama/ollama?tab=readme-ov-file#ollama"
"No running Ollama server detected. Start it with: `ollama serve` (after installing). Install instructions: https://github.com/ollama/ollama?tab=readme-ov-file#ollama"
)]
OllamaServerUnreachable,

View File

@@ -5,11 +5,15 @@ use serde_json::Value as JsonValue;
use std::collections::VecDeque;
use std::io;
use crate::model_provider_info::{ModelProviderInfo, WireApi};
use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
use super::DEFAULT_BASE_URL;
use super::PullEvent;
use super::PullProgressReporter;
use super::parser::pull_events_from_value;
use super::url::{base_url_to_host_root, is_openai_compatible_base_url};
use super::{DEFAULT_BASE_URL, PullEvent, PullProgressReporter};
use super::url::base_url_to_host_root;
use super::url::is_openai_compatible_base_url;
/// Client for interacting with a local Ollama instance.
pub struct OllamaClient {
@@ -160,3 +164,88 @@ impl OllamaClient {
Ok(())
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::expect_used, clippy::unwrap_used)]
use super::*;
use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
// Happy-path tests using a mock HTTP server; skip if sandbox network is disabled.
#[tokio::test]
async fn test_fetch_models_happy_path() {
if std::env::var(crate::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
tracing::info!(
"{} is set; skipping test_fetch_models_happy_path",
crate::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR
);
return;
}
let server = wiremock::MockServer::start().await;
wiremock::Mock::given(wiremock::matchers::method("GET"))
.and(wiremock::matchers::path("/api/tags"))
.respond_with(
wiremock::ResponseTemplate::new(200).set_body_raw(
serde_json::json!({
"models": [ {"name": "llama3.2:3b"}, {"name":"mistral"} ]
})
.to_string(),
"application/json",
),
)
.mount(&server)
.await;
let client = OllamaClient::from_host_root(server.uri());
let models = client.fetch_models().await.expect("fetch models");
assert!(models.contains(&"llama3.2:3b".to_string()));
assert!(models.contains(&"mistral".to_string()));
}
#[tokio::test]
async fn test_probe_server_happy_path_openai_compat_and_native() {
if std::env::var(crate::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
tracing::info!(
"{} set; skipping test_probe_server_happy_path_openai_compat_and_native",
crate::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR
);
return;
}
let server = wiremock::MockServer::start().await;
// Native endpoint
wiremock::Mock::given(wiremock::matchers::method("GET"))
.and(wiremock::matchers::path("/api/tags"))
.respond_with(wiremock::ResponseTemplate::new(200))
.mount(&server)
.await;
let native = OllamaClient::from_host_root(server.uri());
assert!(native.probe_server().await.expect("probe native"));
// OpenAI compatibility endpoint
wiremock::Mock::given(wiremock::matchers::method("GET"))
.and(wiremock::matchers::path("/v1/models"))
.respond_with(wiremock::ResponseTemplate::new(200))
.mount(&server)
.await;
let provider = ModelProviderInfo {
name: "Ollama".to_string(),
base_url: Some(format!("{}/v1", server.uri())),
env_key: None,
env_key_instructions: None,
wire_api: WireApi::Chat,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: None,
stream_max_retries: None,
stream_idle_timeout_ms: None,
requires_auth: false,
};
let compat = OllamaClient::from_provider(&provider);
assert!(compat.probe_server().await.expect("probe compat"));
}
}

View File

@@ -16,10 +16,13 @@ pub mod parser;
pub mod url;
pub use client::OllamaClient;
pub use config::{read_config_models, read_provider_state, write_config_models};
pub use url::{
base_url_to_host_root, base_url_to_host_root_with_wire, probe_ollama_server, probe_url_for_base,
};
pub use config::read_config_models;
pub use config::read_provider_state;
pub use config::write_config_models;
pub use url::base_url_to_host_root;
pub use url::base_url_to_host_root_with_wire;
pub use url::probe_ollama_server;
pub use url::probe_url_for_base;
/// Coordinator wrapper used by frontends when responding to `--ollama`.
///
/// - Probes the server using the configured base_url when present, otherwise
@@ -50,8 +53,10 @@ pub async fn ensure_configured_and_running() -> CoreResult<()> {
Err(_) => DEFAULT_BASE_URL.to_string(),
};
// Probe reachability.
let ok = url::probe_ollama_server(&base_url).await?;
// Probe reachability; map any probe error to a friendly unreachable message.
let ok: bool = url::probe_ollama_server(&base_url)
.await
.unwrap_or_default();
if !ok {
return Err(CodexErr::OllamaServerUnreachable);
}
@@ -61,6 +66,45 @@ pub async fn ensure_configured_and_running() -> CoreResult<()> {
Ok(())
}
#[cfg(test)]
mod ensure_tests {
#![allow(clippy::expect_used, clippy::unwrap_used)]
use super::*;
#[tokio::test]
async fn test_ensure_configured_returns_friendly_error_when_unreachable() {
// Skip in CI sandbox environments without network to avoid false negatives.
if std::env::var(crate::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
tracing::info!(
"{} is set; skipping test_ensure_configured_returns_friendly_error_when_unreachable",
crate::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR
);
return;
}
let tmpdir = tempfile::TempDir::new().expect("tempdir");
let config_path = tmpdir.path().join("config.toml");
std::fs::create_dir_all(tmpdir.path()).unwrap();
std::fs::write(
&config_path,
r#"[model_providers.ollama]
name = "Ollama"
base_url = "http://127.0.0.1:1/v1"
wire_api = "chat"
"#,
)
.unwrap();
unsafe {
std::env::set_var("CODEX_HOME", tmpdir.path());
}
let err = ensure_configured_and_running()
.await
.expect_err("should report unreachable server as friendly error");
assert!(matches!(err, CodexErr::OllamaServerUnreachable));
}
}
/// Events emitted while pulling a model from Ollama.
#[derive(Debug, Clone)]
pub enum PullEvent {