mirror of
https://github.com/openai/codex.git
synced 2026-04-28 08:34:54 +00:00
940 lines
32 KiB
Markdown
940 lines
32 KiB
Markdown
# PR #1847: feat: add a built-in model provider named "oss"
|
||
|
||
- URL: https://github.com/openai/codex/pull/1847
|
||
- Author: bolinfest
|
||
- Created: 2025-08-05 09:13:36 UTC
|
||
- Updated: 2025-08-05 18:32:16 UTC
|
||
- Changes: +686/-44, Files changed: 13, Commits: 1
|
||
|
||
## Description
|
||
|
||
Builds off of the work in https://github.com/openai/codex/pull/1813, but ports only the business logic. Introduces a new built-in provider named `oss` rather than trying to write one to the user's `config.toml`.
|
||
|
||
## Full Diff
|
||
|
||
```diff
|
||
diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock
|
||
index 2e20a7d624..eb630c6fe5 100644
|
||
--- a/codex-rs/Cargo.lock
|
||
+++ b/codex-rs/Cargo.lock
|
||
@@ -838,6 +838,23 @@ dependencies = [
|
||
"wiremock",
|
||
]
|
||
|
||
+[[package]]
|
||
+name = "codex-ollama"
|
||
+version = "0.0.0"
|
||
+dependencies = [
|
||
+ "async-stream",
|
||
+ "bytes",
|
||
+ "codex-core",
|
||
+ "futures",
|
||
+ "reqwest",
|
||
+ "serde_json",
|
||
+ "tempfile",
|
||
+ "tokio",
|
||
+ "toml 0.9.4",
|
||
+ "tracing",
|
||
+ "wiremock",
|
||
+]
|
||
+
|
||
[[package]]
|
||
name = "codex-tui"
|
||
version = "0.0.0"
|
||
@@ -852,6 +869,7 @@ dependencies = [
|
||
"codex-core",
|
||
"codex-file-search",
|
||
"codex-login",
|
||
+ "codex-ollama",
|
||
"color-eyre",
|
||
"crossterm",
|
||
"image",
|
||
diff --git a/codex-rs/Cargo.toml b/codex-rs/Cargo.toml
|
||
index 0f8085c7e5..0ed8852228 100644
|
||
--- a/codex-rs/Cargo.toml
|
||
+++ b/codex-rs/Cargo.toml
|
||
@@ -14,6 +14,7 @@ members = [
|
||
"mcp-client",
|
||
"mcp-server",
|
||
"mcp-types",
|
||
+ "ollama",
|
||
"tui",
|
||
]
|
||
resolver = "2"
|
||
diff --git a/codex-rs/core/src/lib.rs b/codex-rs/core/src/lib.rs
|
||
index f9c608b554..965cb77bf1 100644
|
||
--- a/codex-rs/core/src/lib.rs
|
||
+++ b/codex-rs/core/src/lib.rs
|
||
@@ -28,6 +28,7 @@ mod mcp_connection_manager;
|
||
mod mcp_tool_call;
|
||
mod message_history;
|
||
mod model_provider_info;
|
||
+pub use model_provider_info::BUILT_IN_OSS_MODEL_PROVIDER_ID;
|
||
pub use model_provider_info::ModelProviderInfo;
|
||
pub use model_provider_info::WireApi;
|
||
pub use model_provider_info::built_in_model_providers;
|
||
diff --git a/codex-rs/core/src/model_provider_info.rs b/codex-rs/core/src/model_provider_info.rs
|
||
index 49478660f4..595f05ef75 100644
|
||
--- a/codex-rs/core/src/model_provider_info.rs
|
||
+++ b/codex-rs/core/src/model_provider_info.rs
|
||
@@ -226,53 +226,93 @@ impl ModelProviderInfo {
|
||
}
|
||
}
|
||
|
||
+const DEFAULT_OLLAMA_PORT: u32 = 11434;
|
||
+
|
||
+pub const BUILT_IN_OSS_MODEL_PROVIDER_ID: &str = "oss";
|
||
+
|
||
/// Built-in default provider list.
|
||
pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
|
||
use ModelProviderInfo as P;
|
||
|
||
- // We do not want to be in the business of adjucating which third-party
|
||
- // providers are bundled with Codex CLI, so we only include the OpenAI
|
||
- // provider by default. Users are encouraged to add to `model_providers`
|
||
- // in config.toml to add their own providers.
|
||
- [(
|
||
- "openai",
|
||
- P {
|
||
- name: "OpenAI".into(),
|
||
- // Allow users to override the default OpenAI endpoint by
|
||
- // exporting `OPENAI_BASE_URL`. This is useful when pointing
|
||
- // Codex at a proxy, mock server, or Azure-style deployment
|
||
- // without requiring a full TOML override for the built-in
|
||
- // OpenAI provider.
|
||
- base_url: std::env::var("OPENAI_BASE_URL")
|
||
+ // These CODEX_OSS_ environment variables are experimental: we may
|
||
+ // switch to reading values from config.toml instead.
|
||
+ let codex_oss_base_url = match std::env::var("CODEX_OSS_BASE_URL")
|
||
+ .ok()
|
||
+ .filter(|v| !v.trim().is_empty())
|
||
+ {
|
||
+ Some(url) => url,
|
||
+ None => format!(
|
||
+ "http://localhost:{port}/v1",
|
||
+ port = std::env::var("CODEX_OSS_PORT")
|
||
.ok()
|
||
- .filter(|v| !v.trim().is_empty()),
|
||
- env_key: None,
|
||
- env_key_instructions: None,
|
||
- wire_api: WireApi::Responses,
|
||
- query_params: None,
|
||
- http_headers: Some(
|
||
- [("version".to_string(), env!("CARGO_PKG_VERSION").to_string())]
|
||
+ .filter(|v| !v.trim().is_empty())
|
||
+ .and_then(|v| v.parse::<u32>().ok())
|
||
+ .unwrap_or(DEFAULT_OLLAMA_PORT)
|
||
+ ),
|
||
+ };
|
||
+
|
||
+ // We do not want to be in the business of adjucating which third-party
|
||
+ // providers are bundled with Codex CLI, so we only include the OpenAI and
|
||
+ // open source ("oss") providers by default. Users are encouraged to add to
|
||
+ // `model_providers` in config.toml to add their own providers.
|
||
+ [
|
||
+ (
|
||
+ "openai",
|
||
+ P {
|
||
+ name: "OpenAI".into(),
|
||
+ // Allow users to override the default OpenAI endpoint by
|
||
+ // exporting `OPENAI_BASE_URL`. This is useful when pointing
|
||
+ // Codex at a proxy, mock server, or Azure-style deployment
|
||
+ // without requiring a full TOML override for the built-in
|
||
+ // OpenAI provider.
|
||
+ base_url: std::env::var("OPENAI_BASE_URL")
|
||
+ .ok()
|
||
+ .filter(|v| !v.trim().is_empty()),
|
||
+ env_key: None,
|
||
+ env_key_instructions: None,
|
||
+ wire_api: WireApi::Responses,
|
||
+ query_params: None,
|
||
+ http_headers: Some(
|
||
+ [("version".to_string(), env!("CARGO_PKG_VERSION").to_string())]
|
||
+ .into_iter()
|
||
+ .collect(),
|
||
+ ),
|
||
+ env_http_headers: Some(
|
||
+ [
|
||
+ (
|
||
+ "OpenAI-Organization".to_string(),
|
||
+ "OPENAI_ORGANIZATION".to_string(),
|
||
+ ),
|
||
+ ("OpenAI-Project".to_string(), "OPENAI_PROJECT".to_string()),
|
||
+ ]
|
||
.into_iter()
|
||
.collect(),
|
||
- ),
|
||
- env_http_headers: Some(
|
||
- [
|
||
- (
|
||
- "OpenAI-Organization".to_string(),
|
||
- "OPENAI_ORGANIZATION".to_string(),
|
||
- ),
|
||
- ("OpenAI-Project".to_string(), "OPENAI_PROJECT".to_string()),
|
||
- ]
|
||
- .into_iter()
|
||
- .collect(),
|
||
- ),
|
||
- // Use global defaults for retry/timeout unless overridden in config.toml.
|
||
- request_max_retries: None,
|
||
- stream_max_retries: None,
|
||
- stream_idle_timeout_ms: None,
|
||
- requires_auth: true,
|
||
- },
|
||
- )]
|
||
+ ),
|
||
+ // Use global defaults for retry/timeout unless overridden in config.toml.
|
||
+ request_max_retries: None,
|
||
+ stream_max_retries: None,
|
||
+ stream_idle_timeout_ms: None,
|
||
+ requires_auth: true,
|
||
+ },
|
||
+ ),
|
||
+ (
|
||
+ BUILT_IN_OSS_MODEL_PROVIDER_ID,
|
||
+ P {
|
||
+ name: "Open Source".into(),
|
||
+ base_url: Some(codex_oss_base_url),
|
||
+ env_key: None,
|
||
+ env_key_instructions: None,
|
||
+ wire_api: WireApi::Chat,
|
||
+ query_params: None,
|
||
+ http_headers: None,
|
||
+ env_http_headers: None,
|
||
+ request_max_retries: None,
|
||
+ stream_max_retries: None,
|
||
+ stream_idle_timeout_ms: None,
|
||
+ requires_auth: false,
|
||
+ },
|
||
+ ),
|
||
+ ]
|
||
.into_iter()
|
||
.map(|(k, v)| (k.to_string(), v))
|
||
.collect()
|
||
diff --git a/codex-rs/ollama/Cargo.toml b/codex-rs/ollama/Cargo.toml
|
||
new file mode 100644
|
||
index 0000000000..ead9a06494
|
||
--- /dev/null
|
||
+++ b/codex-rs/ollama/Cargo.toml
|
||
@@ -0,0 +1,32 @@
|
||
+[package]
|
||
+edition = "2024"
|
||
+name = "codex-ollama"
|
||
+version = { workspace = true }
|
||
+
|
||
+[lib]
|
||
+name = "codex_ollama"
|
||
+path = "src/lib.rs"
|
||
+
|
||
+[lints]
|
||
+workspace = true
|
||
+
|
||
+[dependencies]
|
||
+async-stream = "0.3"
|
||
+bytes = "1.10.1"
|
||
+codex-core = { path = "../core" }
|
||
+futures = "0.3"
|
||
+reqwest = { version = "0.12", features = ["json", "stream"] }
|
||
+serde_json = "1"
|
||
+tokio = { version = "1", features = [
|
||
+ "io-std",
|
||
+ "macros",
|
||
+ "process",
|
||
+ "rt-multi-thread",
|
||
+ "signal",
|
||
+] }
|
||
+toml = "0.9.2"
|
||
+tracing = { version = "0.1.41", features = ["log"] }
|
||
+wiremock = "0.6"
|
||
+
|
||
+[dev-dependencies]
|
||
+tempfile = "3"
|
||
diff --git a/codex-rs/ollama/src/client.rs b/codex-rs/ollama/src/client.rs
|
||
new file mode 100644
|
||
index 0000000000..45190e8238
|
||
--- /dev/null
|
||
+++ b/codex-rs/ollama/src/client.rs
|
||
@@ -0,0 +1,255 @@
|
||
+use bytes::BytesMut;
|
||
+use futures::StreamExt;
|
||
+use futures::stream::BoxStream;
|
||
+use serde_json::Value as JsonValue;
|
||
+use std::collections::VecDeque;
|
||
+use std::io;
|
||
+
|
||
+use codex_core::WireApi;
|
||
+
|
||
+use crate::parser::pull_events_from_value;
|
||
+use crate::pull::PullEvent;
|
||
+use crate::pull::PullProgressReporter;
|
||
+use crate::url::base_url_to_host_root;
|
||
+use crate::url::is_openai_compatible_base_url;
|
||
+
|
||
+/// Client for interacting with a local Ollama instance.
|
||
+pub struct OllamaClient {
|
||
+ client: reqwest::Client,
|
||
+ host_root: String,
|
||
+ uses_openai_compat: bool,
|
||
+}
|
||
+
|
||
+impl OllamaClient {
|
||
+ pub fn from_oss_provider() -> Self {
|
||
+ #![allow(clippy::expect_used)]
|
||
+ // Use the built-in OSS provider's base URL.
|
||
+ let built_in_model_providers = codex_core::built_in_model_providers();
|
||
+ let provider = built_in_model_providers
|
||
+ .get(codex_core::BUILT_IN_OSS_MODEL_PROVIDER_ID)
|
||
+ .expect("oss provider must exist");
|
||
+ let base_url = provider
|
||
+ .base_url
|
||
+ .as_ref()
|
||
+ .expect("oss provider must have a base_url");
|
||
+ Self::from_provider(base_url, provider.wire_api)
|
||
+ }
|
||
+
|
||
+ /// Build a client from a provider definition. Falls back to the default
|
||
+ /// local URL if no base_url is configured.
|
||
+ fn from_provider(base_url: &str, wire_api: WireApi) -> Self {
|
||
+ let uses_openai_compat = is_openai_compatible_base_url(base_url)
|
||
+ || matches!(wire_api, WireApi::Chat) && is_openai_compatible_base_url(base_url);
|
||
+ let host_root = base_url_to_host_root(base_url);
|
||
+ let client = reqwest::Client::builder()
|
||
+ .connect_timeout(std::time::Duration::from_secs(5))
|
||
+ .build()
|
||
+ .unwrap_or_else(|_| reqwest::Client::new());
|
||
+ Self {
|
||
+ client,
|
||
+ host_root,
|
||
+ uses_openai_compat,
|
||
+ }
|
||
+ }
|
||
+
|
||
+ pub fn get_host(&self) -> &str {
|
||
+ &self.host_root
|
||
+ }
|
||
+
|
||
+ /// Low-level constructor given a raw host root, e.g. "http://localhost:11434".
|
||
+ #[cfg(test)]
|
||
+ fn from_host_root(host_root: impl Into<String>) -> Self {
|
||
+ let client = reqwest::Client::builder()
|
||
+ .connect_timeout(std::time::Duration::from_secs(5))
|
||
+ .build()
|
||
+ .unwrap_or_else(|_| reqwest::Client::new());
|
||
+ Self {
|
||
+ client,
|
||
+ host_root: host_root.into(),
|
||
+ uses_openai_compat: false,
|
||
+ }
|
||
+ }
|
||
+
|
||
+ /// Probe whether the server is reachable by hitting the appropriate health endpoint.
|
||
+ pub async fn probe_server(&self) -> io::Result<bool> {
|
||
+ let url = if self.uses_openai_compat {
|
||
+ format!("{}/v1/models", self.host_root.trim_end_matches('/'))
|
||
+ } else {
|
||
+ format!("{}/api/tags", self.host_root.trim_end_matches('/'))
|
||
+ };
|
||
+ let resp = self.client.get(url).send().await;
|
||
+ Ok(matches!(resp, Ok(r) if r.status().is_success()))
|
||
+ }
|
||
+
|
||
+ /// Return the list of model names known to the local Ollama instance.
|
||
+ pub async fn fetch_models(&self) -> io::Result<Vec<String>> {
|
||
+ let tags_url = format!("{}/api/tags", self.host_root.trim_end_matches('/'));
|
||
+ let resp = self
|
||
+ .client
|
||
+ .get(tags_url)
|
||
+ .send()
|
||
+ .await
|
||
+ .map_err(io::Error::other)?;
|
||
+ if !resp.status().is_success() {
|
||
+ return Ok(Vec::new());
|
||
+ }
|
||
+ let val = resp.json::<JsonValue>().await.map_err(io::Error::other)?;
|
||
+ let names = val
|
||
+ .get("models")
|
||
+ .and_then(|m| m.as_array())
|
||
+ .map(|arr| {
|
||
+ arr.iter()
|
||
+ .filter_map(|v| v.get("name").and_then(|n| n.as_str()))
|
||
+ .map(|s| s.to_string())
|
||
+ .collect::<Vec<_>>()
|
||
+ })
|
||
+ .unwrap_or_default();
|
||
+ Ok(names)
|
||
+ }
|
||
+
|
||
+ /// Start a model pull and emit streaming events. The returned stream ends when
|
||
+ /// a Success event is observed or the server closes the connection.
|
||
+ pub async fn pull_model_stream(
|
||
+ &self,
|
||
+ model: &str,
|
||
+ ) -> io::Result<BoxStream<'static, PullEvent>> {
|
||
+ let url = format!("{}/api/pull", self.host_root.trim_end_matches('/'));
|
||
+ let resp = self
|
||
+ .client
|
||
+ .post(url)
|
||
+ .json(&serde_json::json!({"model": model, "stream": true}))
|
||
+ .send()
|
||
+ .await
|
||
+ .map_err(io::Error::other)?;
|
||
+ if !resp.status().is_success() {
|
||
+ return Err(io::Error::other(format!(
|
||
+ "failed to start pull: HTTP {}",
|
||
+ resp.status()
|
||
+ )));
|
||
+ }
|
||
+
|
||
+ let mut stream = resp.bytes_stream();
|
||
+ let mut buf = BytesMut::new();
|
||
+ let _pending: VecDeque<PullEvent> = VecDeque::new();
|
||
+
|
||
+ // Using an async stream adaptor backed by unfold-like manual loop.
|
||
+ let s = async_stream::stream! {
|
||
+ while let Some(chunk) = stream.next().await {
|
||
+ match chunk {
|
||
+ Ok(bytes) => {
|
||
+ buf.extend_from_slice(&bytes);
|
||
+ while let Some(pos) = buf.iter().position(|b| *b == b'\n') {
|
||
+ let line = buf.split_to(pos + 1);
|
||
+ if let Ok(text) = std::str::from_utf8(&line) {
|
||
+ let text = text.trim();
|
||
+ if text.is_empty() { continue; }
|
||
+ if let Ok(value) = serde_json::from_str::<JsonValue>(text) {
|
||
+ for ev in pull_events_from_value(&value) { yield ev; }
|
||
+ if let Some(err_msg) = value.get("error").and_then(|e| e.as_str()) {
|
||
+ yield PullEvent::Status(format!("error: {err_msg}"));
|
||
+ return;
|
||
+ }
|
||
+ if let Some(status) = value.get("status").and_then(|s| s.as_str()) {
|
||
+ if status == "success" { yield PullEvent::Success; return; }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ Err(_) => {
|
||
+ // Connection error: end the stream.
|
||
+ return;
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ };
|
||
+
|
||
+ Ok(Box::pin(s))
|
||
+ }
|
||
+
|
||
+ /// High-level helper to pull a model and drive a progress reporter.
|
||
+ pub async fn pull_with_reporter(
|
||
+ &self,
|
||
+ model: &str,
|
||
+ reporter: &mut dyn PullProgressReporter,
|
||
+ ) -> io::Result<()> {
|
||
+ reporter.on_event(&PullEvent::Status(format!("Pulling model {model}...")))?;
|
||
+ let mut stream = self.pull_model_stream(model).await?;
|
||
+ while let Some(event) = stream.next().await {
|
||
+ reporter.on_event(&event)?;
|
||
+ if matches!(event, PullEvent::Success) {
|
||
+ break;
|
||
+ }
|
||
+ }
|
||
+ Ok(())
|
||
+ }
|
||
+}
|
||
+
|
||
+#[cfg(test)]
|
||
+mod tests {
|
||
+ #![allow(clippy::expect_used, clippy::unwrap_used)]
|
||
+ use super::*;
|
||
+
|
||
+ // Happy-path tests using a mock HTTP server; skip if sandbox network is disabled.
|
||
+ #[tokio::test]
|
||
+ async fn test_fetch_models_happy_path() {
|
||
+ if std::env::var(codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||
+ tracing::info!(
|
||
+ "{} is set; skipping test_fetch_models_happy_path",
|
||
+ codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR
|
||
+ );
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ let server = wiremock::MockServer::start().await;
|
||
+ wiremock::Mock::given(wiremock::matchers::method("GET"))
|
||
+ .and(wiremock::matchers::path("/api/tags"))
|
||
+ .respond_with(
|
||
+ wiremock::ResponseTemplate::new(200).set_body_raw(
|
||
+ serde_json::json!({
|
||
+ "models": [ {"name": "llama3.2:3b"}, {"name":"mistral"} ]
|
||
+ })
|
||
+ .to_string(),
|
||
+ "application/json",
|
||
+ ),
|
||
+ )
|
||
+ .mount(&server)
|
||
+ .await;
|
||
+
|
||
+ let client = OllamaClient::from_host_root(server.uri());
|
||
+ let models = client.fetch_models().await.expect("fetch models");
|
||
+ assert!(models.contains(&"llama3.2:3b".to_string()));
|
||
+ assert!(models.contains(&"mistral".to_string()));
|
||
+ }
|
||
+
|
||
+ #[tokio::test]
|
||
+ async fn test_probe_server_happy_path_openai_compat_and_native() {
|
||
+ if std::env::var(codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
|
||
+ tracing::info!(
|
||
+ "{} set; skipping test_probe_server_happy_path_openai_compat_and_native",
|
||
+ codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR
|
||
+ );
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ let server = wiremock::MockServer::start().await;
|
||
+
|
||
+ // Native endpoint
|
||
+ wiremock::Mock::given(wiremock::matchers::method("GET"))
|
||
+ .and(wiremock::matchers::path("/api/tags"))
|
||
+ .respond_with(wiremock::ResponseTemplate::new(200))
|
||
+ .mount(&server)
|
||
+ .await;
|
||
+ let native = OllamaClient::from_host_root(server.uri());
|
||
+ assert!(native.probe_server().await.expect("probe native"));
|
||
+
|
||
+ // OpenAI compatibility endpoint
|
||
+ wiremock::Mock::given(wiremock::matchers::method("GET"))
|
||
+ .and(wiremock::matchers::path("/v1/models"))
|
||
+ .respond_with(wiremock::ResponseTemplate::new(200))
|
||
+ .mount(&server)
|
||
+ .await;
|
||
+ let ollama_client = OllamaClient::from_provider(&server.uri(), WireApi::Chat);
|
||
+ assert!(ollama_client.probe_server().await.expect("probe compat"));
|
||
+ }
|
||
+}
|
||
diff --git a/codex-rs/ollama/src/lib.rs b/codex-rs/ollama/src/lib.rs
|
||
new file mode 100644
|
||
index 0000000000..671e02a01e
|
||
--- /dev/null
|
||
+++ b/codex-rs/ollama/src/lib.rs
|
||
@@ -0,0 +1,6 @@
|
||
+mod client;
|
||
+mod parser;
|
||
+mod pull;
|
||
+mod url;
|
||
+
|
||
+pub use client::OllamaClient;
|
||
diff --git a/codex-rs/ollama/src/parser.rs b/codex-rs/ollama/src/parser.rs
|
||
new file mode 100644
|
||
index 0000000000..b3ed2ca8c3
|
||
--- /dev/null
|
||
+++ b/codex-rs/ollama/src/parser.rs
|
||
@@ -0,0 +1,82 @@
|
||
+use serde_json::Value as JsonValue;
|
||
+
|
||
+use crate::pull::PullEvent;
|
||
+
|
||
+// Convert a single JSON object representing a pull update into one or more events.
|
||
+pub(crate) fn pull_events_from_value(value: &JsonValue) -> Vec<PullEvent> {
|
||
+ let mut events = Vec::new();
|
||
+ if let Some(status) = value.get("status").and_then(|s| s.as_str()) {
|
||
+ events.push(PullEvent::Status(status.to_string()));
|
||
+ if status == "success" {
|
||
+ events.push(PullEvent::Success);
|
||
+ }
|
||
+ }
|
||
+ let digest = value
|
||
+ .get("digest")
|
||
+ .and_then(|d| d.as_str())
|
||
+ .unwrap_or("")
|
||
+ .to_string();
|
||
+ let total = value.get("total").and_then(|t| t.as_u64());
|
||
+ let completed = value.get("completed").and_then(|t| t.as_u64());
|
||
+ if total.is_some() || completed.is_some() {
|
||
+ events.push(PullEvent::ChunkProgress {
|
||
+ digest,
|
||
+ total,
|
||
+ completed,
|
||
+ });
|
||
+ }
|
||
+ events
|
||
+}
|
||
+
|
||
+#[cfg(test)]
|
||
+mod tests {
|
||
+ use super::*;
|
||
+
|
||
+ #[test]
|
||
+ fn test_pull_events_decoder_status_and_success() {
|
||
+ let v: JsonValue = serde_json::json!({"status":"verifying"});
|
||
+ let events = pull_events_from_value(&v);
|
||
+ assert!(matches!(events.as_slice(), [PullEvent::Status(s)] if s == "verifying"));
|
||
+
|
||
+ let v2: JsonValue = serde_json::json!({"status":"success"});
|
||
+ let events2 = pull_events_from_value(&v2);
|
||
+ assert_eq!(events2.len(), 2);
|
||
+ assert!(matches!(events2[0], PullEvent::Status(ref s) if s == "success"));
|
||
+ assert!(matches!(events2[1], PullEvent::Success));
|
||
+ }
|
||
+
|
||
+ #[test]
|
||
+ fn test_pull_events_decoder_progress() {
|
||
+ let v: JsonValue = serde_json::json!({"digest":"sha256:abc","total":100});
|
||
+ let events = pull_events_from_value(&v);
|
||
+ assert_eq!(events.len(), 1);
|
||
+ match &events[0] {
|
||
+ PullEvent::ChunkProgress {
|
||
+ digest,
|
||
+ total,
|
||
+ completed,
|
||
+ } => {
|
||
+ assert_eq!(digest, "sha256:abc");
|
||
+ assert_eq!(*total, Some(100));
|
||
+ assert_eq!(*completed, None);
|
||
+ }
|
||
+ _ => panic!("expected ChunkProgress"),
|
||
+ }
|
||
+
|
||
+ let v2: JsonValue = serde_json::json!({"digest":"sha256:def","completed":42});
|
||
+ let events2 = pull_events_from_value(&v2);
|
||
+ assert_eq!(events2.len(), 1);
|
||
+ match &events2[0] {
|
||
+ PullEvent::ChunkProgress {
|
||
+ digest,
|
||
+ total,
|
||
+ completed,
|
||
+ } => {
|
||
+ assert_eq!(digest, "sha256:def");
|
||
+ assert_eq!(*total, None);
|
||
+ assert_eq!(*completed, Some(42));
|
||
+ }
|
||
+ _ => panic!("expected ChunkProgress"),
|
||
+ }
|
||
+ }
|
||
+}
|
||
diff --git a/codex-rs/ollama/src/pull.rs b/codex-rs/ollama/src/pull.rs
|
||
new file mode 100644
|
||
index 0000000000..aebca698eb
|
||
--- /dev/null
|
||
+++ b/codex-rs/ollama/src/pull.rs
|
||
@@ -0,0 +1,139 @@
|
||
+use std::collections::HashMap;
|
||
+use std::io;
|
||
+use std::io::Write;
|
||
+
|
||
+/// Events emitted while pulling a model from Ollama.
|
||
+#[derive(Debug, Clone)]
|
||
+pub enum PullEvent {
|
||
+ /// A human-readable status message (e.g., "verifying", "writing").
|
||
+ Status(String),
|
||
+ /// Byte-level progress update for a specific layer digest.
|
||
+ ChunkProgress {
|
||
+ digest: String,
|
||
+ total: Option<u64>,
|
||
+ completed: Option<u64>,
|
||
+ },
|
||
+ /// The pull finished successfully.
|
||
+ Success,
|
||
+}
|
||
+
|
||
+/// A simple observer for pull progress events. Implementations decide how to
|
||
+/// render progress (CLI, TUI, logs, ...).
|
||
+pub trait PullProgressReporter {
|
||
+ fn on_event(&mut self, event: &PullEvent) -> io::Result<()>;
|
||
+}
|
||
+
|
||
+/// A minimal CLI reporter that writes inline progress to stderr.
|
||
+pub struct CliProgressReporter {
|
||
+ printed_header: bool,
|
||
+ last_line_len: usize,
|
||
+ last_completed_sum: u64,
|
||
+ last_instant: std::time::Instant,
|
||
+ totals_by_digest: HashMap<String, (u64, u64)>,
|
||
+}
|
||
+
|
||
+impl Default for CliProgressReporter {
|
||
+ fn default() -> Self {
|
||
+ Self::new()
|
||
+ }
|
||
+}
|
||
+
|
||
+impl CliProgressReporter {
|
||
+ pub fn new() -> Self {
|
||
+ Self {
|
||
+ printed_header: false,
|
||
+ last_line_len: 0,
|
||
+ last_completed_sum: 0,
|
||
+ last_instant: std::time::Instant::now(),
|
||
+ totals_by_digest: HashMap::new(),
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+impl PullProgressReporter for CliProgressReporter {
|
||
+ fn on_event(&mut self, event: &PullEvent) -> io::Result<()> {
|
||
+ let mut out = std::io::stderr();
|
||
+ match event {
|
||
+ PullEvent::Status(status) => {
|
||
+ // Avoid noisy manifest messages; otherwise show status inline.
|
||
+ if status.eq_ignore_ascii_case("pulling manifest") {
|
||
+ return Ok(());
|
||
+ }
|
||
+ let pad = self.last_line_len.saturating_sub(status.len());
|
||
+ let line = format!("\r{status}{}", " ".repeat(pad));
|
||
+ self.last_line_len = status.len();
|
||
+ out.write_all(line.as_bytes())?;
|
||
+ out.flush()
|
||
+ }
|
||
+ PullEvent::ChunkProgress {
|
||
+ digest,
|
||
+ total,
|
||
+ completed,
|
||
+ } => {
|
||
+ if let Some(t) = *total {
|
||
+ self.totals_by_digest
|
||
+ .entry(digest.clone())
|
||
+ .or_insert((0, 0))
|
||
+ .0 = t;
|
||
+ }
|
||
+ if let Some(c) = *completed {
|
||
+ self.totals_by_digest
|
||
+ .entry(digest.clone())
|
||
+ .or_insert((0, 0))
|
||
+ .1 = c;
|
||
+ }
|
||
+
|
||
+ let (sum_total, sum_completed) = self
|
||
+ .totals_by_digest
|
||
+ .values()
|
||
+ .fold((0u64, 0u64), |acc, (t, c)| (acc.0 + *t, acc.1 + *c));
|
||
+ if sum_total > 0 {
|
||
+ if !self.printed_header {
|
||
+ let gb = (sum_total as f64) / (1024.0 * 1024.0 * 1024.0);
|
||
+ let header = format!("Downloading model: total {gb:.2} GB\n");
|
||
+ out.write_all(b"\r\x1b[2K")?;
|
||
+ out.write_all(header.as_bytes())?;
|
||
+ self.printed_header = true;
|
||
+ }
|
||
+ let now = std::time::Instant::now();
|
||
+ let dt = now
|
||
+ .duration_since(self.last_instant)
|
||
+ .as_secs_f64()
|
||
+ .max(0.001);
|
||
+ let dbytes = sum_completed.saturating_sub(self.last_completed_sum) as f64;
|
||
+ let speed_mb_s = dbytes / (1024.0 * 1024.0) / dt;
|
||
+ self.last_completed_sum = sum_completed;
|
||
+ self.last_instant = now;
|
||
+
|
||
+ let done_gb = (sum_completed as f64) / (1024.0 * 1024.0 * 1024.0);
|
||
+ let total_gb = (sum_total as f64) / (1024.0 * 1024.0 * 1024.0);
|
||
+ let pct = (sum_completed as f64) * 100.0 / (sum_total as f64);
|
||
+ let text =
|
||
+ format!("{done_gb:.2}/{total_gb:.2} GB ({pct:.1}%) {speed_mb_s:.1} MB/s");
|
||
+ let pad = self.last_line_len.saturating_sub(text.len());
|
||
+ let line = format!("\r{text}{}", " ".repeat(pad));
|
||
+ self.last_line_len = text.len();
|
||
+ out.write_all(line.as_bytes())?;
|
||
+ out.flush()
|
||
+ } else {
|
||
+ Ok(())
|
||
+ }
|
||
+ }
|
||
+ PullEvent::Success => {
|
||
+ out.write_all(b"\n")?;
|
||
+ out.flush()
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+/// For now the TUI reporter delegates to the CLI reporter. This keeps UI and
|
||
+/// CLI behavior aligned until a dedicated TUI integration is implemented.
|
||
+#[derive(Default)]
|
||
+pub struct TuiProgressReporter(CliProgressReporter);
|
||
+
|
||
+impl PullProgressReporter for TuiProgressReporter {
|
||
+ fn on_event(&mut self, event: &PullEvent) -> io::Result<()> {
|
||
+ self.0.on_event(event)
|
||
+ }
|
||
+}
|
||
diff --git a/codex-rs/ollama/src/url.rs b/codex-rs/ollama/src/url.rs
|
||
new file mode 100644
|
||
index 0000000000..7c143ce426
|
||
--- /dev/null
|
||
+++ b/codex-rs/ollama/src/url.rs
|
||
@@ -0,0 +1,39 @@
|
||
+/// Identify whether a base_url points at an OpenAI-compatible root (".../v1").
|
||
+pub(crate) fn is_openai_compatible_base_url(base_url: &str) -> bool {
|
||
+ base_url.trim_end_matches('/').ends_with("/v1")
|
||
+}
|
||
+
|
||
+/// Convert a provider base_url into the native Ollama host root.
|
||
+/// For example, "http://localhost:11434/v1" -> "http://localhost:11434".
|
||
+pub fn base_url_to_host_root(base_url: &str) -> String {
|
||
+ let trimmed = base_url.trim_end_matches('/');
|
||
+ if trimmed.ends_with("/v1") {
|
||
+ trimmed
|
||
+ .trim_end_matches("/v1")
|
||
+ .trim_end_matches('/')
|
||
+ .to_string()
|
||
+ } else {
|
||
+ trimmed.to_string()
|
||
+ }
|
||
+}
|
||
+
|
||
+#[cfg(test)]
|
||
+mod tests {
|
||
+ use super::*;
|
||
+
|
||
+ #[test]
|
||
+ fn test_base_url_to_host_root() {
|
||
+ assert_eq!(
|
||
+ base_url_to_host_root("http://localhost:11434/v1"),
|
||
+ "http://localhost:11434"
|
||
+ );
|
||
+ assert_eq!(
|
||
+ base_url_to_host_root("http://localhost:11434"),
|
||
+ "http://localhost:11434"
|
||
+ );
|
||
+ assert_eq!(
|
||
+ base_url_to_host_root("http://localhost:11434/"),
|
||
+ "http://localhost:11434"
|
||
+ );
|
||
+ }
|
||
+}
|
||
diff --git a/codex-rs/tui/Cargo.toml b/codex-rs/tui/Cargo.toml
|
||
index 60af056a2d..254373e156 100644
|
||
--- a/codex-rs/tui/Cargo.toml
|
||
+++ b/codex-rs/tui/Cargo.toml
|
||
@@ -33,6 +33,7 @@ codex-common = { path = "../common", features = [
|
||
codex-core = { path = "../core" }
|
||
codex-file-search = { path = "../file-search" }
|
||
codex-login = { path = "../login" }
|
||
+codex-ollama = { path = "../ollama" }
|
||
color-eyre = "0.6.3"
|
||
crossterm = { version = "0.28.1", features = ["bracketed-paste"] }
|
||
image = { version = "^0.25.6", default-features = false, features = ["jpeg"] }
|
||
@@ -70,11 +71,9 @@ unicode-segmentation = "1.12.0"
|
||
unicode-width = "0.1"
|
||
uuid = "1"
|
||
|
||
-
|
||
-
|
||
[dev-dependencies]
|
||
+chrono = { version = "0.4", features = ["serde"] }
|
||
insta = "1.43.1"
|
||
pretty_assertions = "1"
|
||
rand = "0.8"
|
||
-chrono = { version = "0.4", features = ["serde"] }
|
||
vt100 = "0.16.2"
|
||
diff --git a/codex-rs/tui/src/cli.rs b/codex-rs/tui/src/cli.rs
|
||
index cb1b725a64..85dffbebb3 100644
|
||
--- a/codex-rs/tui/src/cli.rs
|
||
+++ b/codex-rs/tui/src/cli.rs
|
||
@@ -17,6 +17,12 @@ pub struct Cli {
|
||
#[arg(long, short = 'm')]
|
||
pub model: Option<String>,
|
||
|
||
+ /// Convenience flag to select the local open source model provider.
|
||
+ /// Equivalent to -c model_provider=oss; verifies a local Ollama server is
|
||
+ /// running.
|
||
+ #[arg(long = "oss", default_value_t = false)]
|
||
+ pub oss: bool,
|
||
+
|
||
/// Configuration profile from config.toml to specify default options.
|
||
#[arg(long = "profile", short = 'p')]
|
||
pub config_profile: Option<String>,
|
||
diff --git a/codex-rs/tui/src/lib.rs b/codex-rs/tui/src/lib.rs
|
||
index c619ce8ff0..64b75769bd 100644
|
||
--- a/codex-rs/tui/src/lib.rs
|
||
+++ b/codex-rs/tui/src/lib.rs
|
||
@@ -3,12 +3,14 @@
|
||
// alternate‑screen mode starts; that file opts‑out locally via `allow`.
|
||
#![deny(clippy::print_stdout, clippy::print_stderr)]
|
||
use app::App;
|
||
+use codex_core::BUILT_IN_OSS_MODEL_PROVIDER_ID;
|
||
use codex_core::config::Config;
|
||
use codex_core::config::ConfigOverrides;
|
||
use codex_core::config_types::SandboxMode;
|
||
use codex_core::protocol::AskForApproval;
|
||
use codex_core::util::is_inside_git_repo;
|
||
use codex_login::load_auth;
|
||
+use codex_ollama::OllamaClient;
|
||
use log_layer::TuiLogLayer;
|
||
use std::fs::OpenOptions;
|
||
use std::io::Write;
|
||
@@ -70,6 +72,11 @@ pub async fn run_main(
|
||
)
|
||
};
|
||
|
||
+ let model_provider_override = if cli.oss {
|
||
+ Some(BUILT_IN_OSS_MODEL_PROVIDER_ID.to_owned())
|
||
+ } else {
|
||
+ None
|
||
+ };
|
||
let config = {
|
||
// Load configuration and support CLI overrides.
|
||
let overrides = ConfigOverrides {
|
||
@@ -77,7 +84,7 @@ pub async fn run_main(
|
||
approval_policy,
|
||
sandbox_mode,
|
||
cwd: cli.cwd.clone().map(|p| p.canonicalize().unwrap_or(p)),
|
||
- model_provider: None,
|
||
+ model_provider: model_provider_override,
|
||
config_profile: cli.config_profile.clone(),
|
||
codex_linux_sandbox_exe,
|
||
base_instructions: None,
|
||
@@ -177,6 +184,23 @@ pub async fn run_main(
|
||
eprintln!("");
|
||
}
|
||
|
||
+ if cli.oss {
|
||
+ // Should maybe load the client using `config.model_provider`?
|
||
+ let ollama_client = OllamaClient::from_oss_provider();
|
||
+ let is_ollama_available = ollama_client.probe_server().await?;
|
||
+ #[allow(clippy::print_stderr)]
|
||
+ if !is_ollama_available {
|
||
+ eprintln!(
|
||
+ "Ollama server is not reachable at {}. Please ensure Ollama is running.",
|
||
+ ollama_client.get_host()
|
||
+ );
|
||
+ std::process::exit(1);
|
||
+ }
|
||
+
|
||
+ // TODO(easong): Check if the model is available, and if not, prompt the
|
||
+ // user to pull it.
|
||
+ }
|
||
+
|
||
let show_login_screen = should_show_login_screen(&config);
|
||
if show_login_screen {
|
||
std::io::stdout()
|
||
```
|
||
|
||
## Review Comments
|
||
|
||
### codex-rs/tui/src/cli.rs
|
||
|
||
- Created: 2025-08-05 09:46:00 UTC | Link: https://github.com/openai/codex/pull/1847#discussion_r2253770695
|
||
|
||
```diff
|
||
@@ -17,6 +17,12 @@ pub struct Cli {
|
||
#[arg(long, short = 'm')]
|
||
pub model: Option<String>,
|
||
|
||
+ /// Convenience flag to select the local open source model provider.
|
||
+ /// Equivalent to -c model_provider=oss; verifies a local Ollama server is
|
||
```
|
||
|
||
> should really be a macro for `--profile oss` so you can have a default model for your `oss` profile that differs from the default model for your default profile
|
||
|
||
### codex-rs/tui/src/lib.rs
|
||
|
||
- Created: 2025-08-05 09:48:08 UTC | Link: https://github.com/openai/codex/pull/1847#discussion_r2253777890
|
||
|
||
```diff
|
||
@@ -177,6 +184,23 @@ pub async fn run_main(
|
||
eprintln!("");
|
||
}
|
||
|
||
+ if cli.oss {
|
||
```
|
||
|
||
> Also, we need a comparable change to `codex exec`, but it seemed easier to test it out here first. |