ollama: default to Responses API for built-ins (#8798)

This is an alternate PR to solving the same problem as
<https://github.com/openai/codex/pull/8227>.

In this PR, when Ollama is used via `--oss` (or via `model_provider =
"ollama"`), we default it to use the Responses format. At runtime, we do
an Ollama version check, and if the version is older than when Responses
support was added to Ollama, we print out a warning.

Because there's no way of configuring the wire api for a built-in
provider, we temporarily add a new `oss_provider`/`model_provider`
called `"ollama-chat"` that will force the chat format.

Once the `"chat"` format is fully removed (see
<https://github.com/openai/codex/discussions/7782>), `ollama-chat` can
be removed as well

---------

Co-authored-by: Eric Traut <etraut@openai.com>
Co-authored-by: Michael Bolin <mbolin@openai.com>
This commit is contained in:
Devon Rifkin
2026-01-13 09:51:41 -08:00
committed by GitHub
parent 2d56519ecd
commit fe03320791
19 changed files with 274 additions and 14 deletions

2
codex-rs/Cargo.lock generated
View File

@@ -1599,7 +1599,9 @@ dependencies = [
"bytes",
"codex-core",
"futures",
"pretty_assertions",
"reqwest",
"semver",
"serde_json",
"tokio",
"tracing",

View File

@@ -193,6 +193,7 @@ serde_yaml = "0.9"
serial_test = "3.2.0"
sha1 = "0.10.6"
sha2 = "0.10"
semver = "1.0"
shlex = "1.3.0"
similar = "2.7.0"
socket2 = "0.6.1"

View File

@@ -1,18 +1,52 @@
//! OSS provider utilities shared between TUI and exec.
use codex_core::LMSTUDIO_OSS_PROVIDER_ID;
use codex_core::OLLAMA_CHAT_PROVIDER_ID;
use codex_core::OLLAMA_OSS_PROVIDER_ID;
use codex_core::WireApi;
use codex_core::config::Config;
use codex_core::protocol::DeprecationNoticeEvent;
use std::io;
/// Returns the default model for a given OSS provider.
pub fn get_default_model_for_oss_provider(provider_id: &str) -> Option<&'static str> {
match provider_id {
LMSTUDIO_OSS_PROVIDER_ID => Some(codex_lmstudio::DEFAULT_OSS_MODEL),
OLLAMA_OSS_PROVIDER_ID => Some(codex_ollama::DEFAULT_OSS_MODEL),
OLLAMA_OSS_PROVIDER_ID | OLLAMA_CHAT_PROVIDER_ID => Some(codex_ollama::DEFAULT_OSS_MODEL),
_ => None,
}
}
/// Returns a deprecation notice if Ollama doesn't support the responses wire API.
pub async fn ollama_chat_deprecation_notice(
config: &Config,
) -> io::Result<Option<DeprecationNoticeEvent>> {
if config.model_provider_id != OLLAMA_OSS_PROVIDER_ID
|| config.model_provider.wire_api != WireApi::Responses
{
return Ok(None);
}
if let Some(detection) = codex_ollama::detect_wire_api(&config.model_provider).await?
&& detection.wire_api == WireApi::Chat
{
let version_suffix = detection
.version
.as_ref()
.map(|version| format!(" (version {version})"))
.unwrap_or_default();
let summary = format!(
"Your Ollama server{version_suffix} doesn't support the Responses API. Either update Ollama or set `oss_provider = \"{OLLAMA_CHAT_PROVIDER_ID}\"` (or `model_provider = \"{OLLAMA_CHAT_PROVIDER_ID}\"`) in your config.toml to use the \"chat\" wire API. Support for the \"chat\" wire API is deprecated and will soon be removed."
);
return Ok(Some(DeprecationNoticeEvent {
summary,
details: None,
}));
}
Ok(None)
}
/// Ensures the specified OSS provider is ready (models downloaded, service reachable).
pub async fn ensure_oss_provider_ready(
provider_id: &str,
@@ -24,7 +58,7 @@ pub async fn ensure_oss_provider_ready(
.await
.map_err(|e| std::io::Error::other(format!("OSS setup failed: {e}")))?;
}
OLLAMA_OSS_PROVIDER_ID => {
OLLAMA_OSS_PROVIDER_ID | OLLAMA_CHAT_PROVIDER_ID => {
codex_ollama::ensure_oss_ready(config)
.await
.map_err(|e| std::io::Error::other(format!("OSS setup failed: {e}")))?;

View File

@@ -24,6 +24,7 @@ use crate::features::FeaturesToml;
use crate::git_info::resolve_root_git_project_for_trust;
use crate::model_provider_info::LMSTUDIO_OSS_PROVIDER_ID;
use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::OLLAMA_CHAT_PROVIDER_ID;
use crate::model_provider_info::OLLAMA_OSS_PROVIDER_ID;
use crate::model_provider_info::built_in_model_providers;
use crate::project_doc::DEFAULT_PROJECT_DOC_FILENAME;
@@ -648,14 +649,14 @@ pub fn set_project_trust_level(
pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::Result<()> {
// Validate that the provider is one of the known OSS providers
match provider {
LMSTUDIO_OSS_PROVIDER_ID | OLLAMA_OSS_PROVIDER_ID => {
LMSTUDIO_OSS_PROVIDER_ID | OLLAMA_OSS_PROVIDER_ID | OLLAMA_CHAT_PROVIDER_ID => {
// Valid provider, continue
}
_ => {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!(
"Invalid OSS provider '{provider}'. Must be one of: {LMSTUDIO_OSS_PROVIDER_ID}, {OLLAMA_OSS_PROVIDER_ID}"
"Invalid OSS provider '{provider}'. Must be one of: {LMSTUDIO_OSS_PROVIDER_ID}, {OLLAMA_OSS_PROVIDER_ID}, {OLLAMA_CHAT_PROVIDER_ID}"
),
));
}
@@ -861,7 +862,7 @@ pub struct ConfigToml {
pub experimental_compact_prompt_file: Option<AbsolutePathBuf>,
pub experimental_use_unified_exec_tool: Option<bool>,
pub experimental_use_freeform_apply_patch: Option<bool>,
/// Preferred OSS provider for local models, e.g. "lmstudio" or "ollama".
/// Preferred OSS provider for local models, e.g. "lmstudio", "ollama", or "ollama-chat".
pub oss_provider: Option<String>,
}

View File

@@ -57,6 +57,7 @@ pub use model_provider_info::DEFAULT_LMSTUDIO_PORT;
pub use model_provider_info::DEFAULT_OLLAMA_PORT;
pub use model_provider_info::LMSTUDIO_OSS_PROVIDER_ID;
pub use model_provider_info::ModelProviderInfo;
pub use model_provider_info::OLLAMA_CHAT_PROVIDER_ID;
pub use model_provider_info::OLLAMA_OSS_PROVIDER_ID;
pub use model_provider_info::WireApi;
pub use model_provider_info::built_in_model_providers;

View File

@@ -265,6 +265,7 @@ pub const DEFAULT_OLLAMA_PORT: u16 = 11434;
pub const LMSTUDIO_OSS_PROVIDER_ID: &str = "lmstudio";
pub const OLLAMA_OSS_PROVIDER_ID: &str = "ollama";
pub const OLLAMA_CHAT_PROVIDER_ID: &str = "ollama-chat";
/// Built-in default provider list.
pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
@@ -278,6 +279,10 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
("openai", P::create_openai_provider()),
(
OLLAMA_OSS_PROVIDER_ID,
create_oss_provider(DEFAULT_OLLAMA_PORT, WireApi::Responses),
),
(
OLLAMA_CHAT_PROVIDER_ID,
create_oss_provider(DEFAULT_OLLAMA_PORT, WireApi::Chat),
),
(

View File

@@ -28,7 +28,7 @@ pub struct Cli {
#[arg(long = "oss", default_value_t = false)]
pub oss: bool,
/// Specify which local provider to use (lmstudio or ollama).
/// Specify which local provider to use (lmstudio, ollama, or ollama-chat).
/// If not specified with --oss, will use config default or show selection.
#[arg(long = "local-provider")]
pub oss_provider: Option<String>,

View File

@@ -15,9 +15,11 @@ pub use cli::Command;
pub use cli::ReviewArgs;
use codex_common::oss::ensure_oss_provider_ready;
use codex_common::oss::get_default_model_for_oss_provider;
use codex_common::oss::ollama_chat_deprecation_notice;
use codex_core::AuthManager;
use codex_core::LMSTUDIO_OSS_PROVIDER_ID;
use codex_core::NewThread;
use codex_core::OLLAMA_CHAT_PROVIDER_ID;
use codex_core::OLLAMA_OSS_PROVIDER_ID;
use codex_core::ThreadManager;
use codex_core::auth::enforce_login_restrictions;
@@ -176,7 +178,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
Some(provider)
} else {
return Err(anyhow::anyhow!(
"No default OSS provider configured. Use --local-provider=provider or set oss_provider to either {LMSTUDIO_OSS_PROVIDER_ID} or {OLLAMA_OSS_PROVIDER_ID} in config.toml"
"No default OSS provider configured. Use --local-provider=provider or set oss_provider to one of: {LMSTUDIO_OSS_PROVIDER_ID}, {OLLAMA_OSS_PROVIDER_ID}, {OLLAMA_CHAT_PROVIDER_ID} in config.toml"
));
}
} else {
@@ -223,6 +225,14 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
std::process::exit(1);
}
let ollama_chat_support_notice = match ollama_chat_deprecation_notice(&config).await {
Ok(notice) => notice,
Err(err) => {
tracing::warn!(?err, "Failed to detect Ollama wire API");
None
}
};
let otel =
codex_core::otel_init::build_provider(&config, env!("CARGO_PKG_VERSION"), None, false);
@@ -253,6 +263,12 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
last_message_file.clone(),
)),
};
if let Some(notice) = ollama_chat_support_notice {
event_processor.process_event(Event {
id: String::new(),
msg: EventMsg::DeprecationNotice(notice),
});
}
if oss {
// We're in the oss section, so provider_id should be Some

View File

@@ -17,6 +17,7 @@ bytes = { workspace = true }
codex-core = { workspace = true }
futures = { workspace = true }
reqwest = { workspace = true, features = ["json", "stream"] }
semver = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true, features = [
"io-std",
@@ -30,3 +31,4 @@ wiremock = { workspace = true }
[dev-dependencies]
assert_matches = { workspace = true }
pretty_assertions = { workspace = true }

View File

@@ -1,6 +1,7 @@
use bytes::BytesMut;
use futures::StreamExt;
use futures::stream::BoxStream;
use semver::Version;
use serde_json::Value as JsonValue;
use std::collections::VecDeque;
use std::io;
@@ -53,7 +54,7 @@ impl OllamaClient {
}
/// Build a client from a provider definition and verify the server is reachable.
async fn try_from_provider(provider: &ModelProviderInfo) -> io::Result<Self> {
pub(crate) async fn try_from_provider(provider: &ModelProviderInfo) -> io::Result<Self> {
#![expect(clippy::expect_used)]
let base_url = provider
.base_url
@@ -125,6 +126,32 @@ impl OllamaClient {
Ok(names)
}
/// Query the server for its version string, returning `None` when unavailable.
pub async fn fetch_version(&self) -> io::Result<Option<Version>> {
let version_url = format!("{}/api/version", self.host_root.trim_end_matches('/'));
let resp = self
.client
.get(version_url)
.send()
.await
.map_err(io::Error::other)?;
if !resp.status().is_success() {
return Ok(None);
}
let val = resp.json::<JsonValue>().await.map_err(io::Error::other)?;
let Some(version_str) = val.get("version").and_then(|v| v.as_str()).map(str::trim) else {
return Ok(None);
};
let normalized = version_str.trim_start_matches('v');
match Version::parse(normalized) {
Ok(version) => Ok(Some(version)),
Err(err) => {
tracing::warn!("Failed to parse Ollama version `{version_str}`: {err}");
Ok(None)
}
}
}
/// Start a model pull and emit streaming events. The returned stream ends when
/// a Success event is observed or the server closes the connection.
pub async fn pull_model_stream(
@@ -236,6 +263,7 @@ impl OllamaClient {
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
// Happy-path tests using a mock HTTP server; skip if sandbox network is disabled.
#[tokio::test]
@@ -269,6 +297,42 @@ mod tests {
assert!(models.contains(&"mistral".to_string()));
}
#[tokio::test]
async fn test_fetch_version() {
if std::env::var(codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {
tracing::info!(
"{} is set; skipping test_fetch_version",
codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR
);
return;
}
let server = wiremock::MockServer::start().await;
wiremock::Mock::given(wiremock::matchers::method("GET"))
.and(wiremock::matchers::path("/api/tags"))
.respond_with(wiremock::ResponseTemplate::new(200).set_body_raw(
serde_json::json!({ "models": [] }).to_string(),
"application/json",
))
.mount(&server)
.await;
wiremock::Mock::given(wiremock::matchers::method("GET"))
.and(wiremock::matchers::path("/api/version"))
.respond_with(wiremock::ResponseTemplate::new(200).set_body_raw(
serde_json::json!({ "version": "0.14.1" }).to_string(),
"application/json",
))
.mount(&server)
.await;
let client = OllamaClient::try_from_provider_with_base_url(server.uri().as_str())
.await
.expect("client");
let version = client.fetch_version().await.expect("version fetch");
assert_eq!(version, Some(Version::new(0, 14, 1)));
}
#[tokio::test]
async fn test_probe_server_happy_path_openai_compat_and_native() {
if std::env::var(codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR).is_ok() {

View File

@@ -4,15 +4,23 @@ mod pull;
mod url;
pub use client::OllamaClient;
use codex_core::ModelProviderInfo;
use codex_core::WireApi;
use codex_core::config::Config;
pub use pull::CliProgressReporter;
pub use pull::PullEvent;
pub use pull::PullProgressReporter;
pub use pull::TuiProgressReporter;
use semver::Version;
/// Default OSS model to use when `--oss` is passed without an explicit `-m`.
pub const DEFAULT_OSS_MODEL: &str = "gpt-oss:20b";
pub struct WireApiDetection {
pub wire_api: WireApi,
pub version: Option<Version>,
}
/// Prepare the local OSS environment when `--oss` is selected.
///
/// - Ensures a local Ollama server is reachable.
@@ -45,3 +53,65 @@ pub async fn ensure_oss_ready(config: &Config) -> std::io::Result<()> {
Ok(())
}
fn min_responses_version() -> Version {
Version::new(0, 13, 4)
}
fn wire_api_for_version(version: &Version) -> WireApi {
if *version == Version::new(0, 0, 0) || *version >= min_responses_version() {
WireApi::Responses
} else {
WireApi::Chat
}
}
/// Detect which wire API the running Ollama server supports based on its version.
/// Returns `Ok(None)` when the version endpoint is missing or unparsable; callers
/// should keep the configured default in that case.
pub async fn detect_wire_api(
provider: &ModelProviderInfo,
) -> std::io::Result<Option<WireApiDetection>> {
let client = crate::OllamaClient::try_from_provider(provider).await?;
let Some(version) = client.fetch_version().await? else {
return Ok(None);
};
let wire_api = wire_api_for_version(&version);
Ok(Some(WireApiDetection {
wire_api,
version: Some(version),
}))
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn test_wire_api_for_version_dev_zero_keeps_responses() {
assert_eq!(
wire_api_for_version(&Version::new(0, 0, 0)),
WireApi::Responses
);
}
#[test]
fn test_wire_api_for_version_before_cutoff_is_chat() {
assert_eq!(wire_api_for_version(&Version::new(0, 13, 3)), WireApi::Chat);
}
#[test]
fn test_wire_api_for_version_at_or_after_cutoff_is_responses() {
assert_eq!(
wire_api_for_version(&Version::new(0, 13, 4)),
WireApi::Responses
);
assert_eq!(
wire_api_for_version(&Version::new(0, 14, 0)),
WireApi::Responses
);
}
}

View File

@@ -35,6 +35,7 @@ use codex_core::features::Feature;
use codex_core::models_manager::manager::ModelsManager;
use codex_core::models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG;
use codex_core::models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
use codex_core::protocol::DeprecationNoticeEvent;
use codex_core::protocol::EventMsg;
use codex_core::protocol::FinalOutput;
use codex_core::protocol::ListSkillsResponseEvent;
@@ -121,6 +122,15 @@ fn emit_skill_load_warnings(app_event_tx: &AppEventSender, errors: &[SkillErrorI
}
}
fn emit_deprecation_notice(app_event_tx: &AppEventSender, notice: Option<DeprecationNoticeEvent>) {
let Some(DeprecationNoticeEvent { summary, details }) = notice else {
return;
};
app_event_tx.send(AppEvent::InsertHistoryCell(Box::new(
crate::history_cell::new_deprecation_notice(summary, details),
)));
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct SessionSummary {
usage_line: String,
@@ -345,10 +355,12 @@ impl App {
session_selection: SessionSelection,
feedback: codex_feedback::CodexFeedback,
is_first_run: bool,
ollama_chat_support_notice: Option<DeprecationNoticeEvent>,
) -> Result<AppExitInfo> {
use tokio_stream::StreamExt;
let (app_event_tx, mut app_event_rx) = unbounded_channel();
let app_event_tx = AppEventSender::new(app_event_tx);
emit_deprecation_notice(&app_event_tx, ollama_chat_support_notice);
let thread_manager = Arc::new(ThreadManager::new(
config.codex_home.clone(),

View File

@@ -58,7 +58,7 @@ pub struct Cli {
#[arg(long = "oss", default_value_t = false)]
pub oss: bool,
/// Specify which local provider to use (lmstudio or ollama).
/// Specify which local provider to use (lmstudio, ollama, or ollama-chat).
/// If not specified with --oss, will use config default or show selection.
#[arg(long = "local-provider")]
pub oss_provider: Option<String>,

View File

@@ -9,6 +9,7 @@ pub use app::AppExitInfo;
use codex_app_server_protocol::AuthMode;
use codex_common::oss::ensure_oss_provider_ready;
use codex_common::oss::get_default_model_for_oss_provider;
use codex_common::oss::ollama_chat_deprecation_notice;
use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::INTERACTIVE_SESSION_SOURCES;
@@ -431,6 +432,13 @@ async fn run_ratatui_app(
initial_config
};
let ollama_chat_support_notice = match ollama_chat_deprecation_notice(&config).await {
Ok(notice) => notice,
Err(err) => {
tracing::warn!(?err, "Failed to detect Ollama wire API");
None
}
};
let mut missing_session_exit = |id_str: &str, action: &str| {
error!("Error finding conversation path: {id_str}");
restore();
@@ -566,6 +574,7 @@ async fn run_ratatui_app(
session_selection,
feedback,
should_show_trust_screen, // Proxy to: is it a first run in this directory?
ollama_chat_support_notice,
)
.await;

View File

@@ -4,6 +4,7 @@ use std::sync::LazyLock;
use codex_core::DEFAULT_LMSTUDIO_PORT;
use codex_core::DEFAULT_OLLAMA_PORT;
use codex_core::LMSTUDIO_OSS_PROVIDER_ID;
use codex_core::OLLAMA_CHAT_PROVIDER_ID;
use codex_core::OLLAMA_OSS_PROVIDER_ID;
use codex_core::config::set_default_oss_provider;
use crossterm::event::Event;
@@ -70,10 +71,16 @@ static OSS_SELECT_OPTIONS: LazyLock<Vec<SelectOption>> = LazyLock::new(|| {
},
SelectOption {
label: Line::from(vec!["O".underlined(), "llama".into()]),
description: "Local Ollama server (default port 11434)",
description: "Local Ollama server (Responses API, default port 11434)",
key: KeyCode::Char('o'),
provider_id: OLLAMA_OSS_PROVIDER_ID,
},
SelectOption {
label: Line::from(vec!["Ollama (".into(), "c".underlined(), "hat)".into()]),
description: "Local Ollama server (chat wire API, default port 11434)",
key: KeyCode::Char('c'),
provider_id: OLLAMA_CHAT_PROVIDER_ID,
},
]
});
@@ -99,7 +106,11 @@ impl OssSelectionWidget<'_> {
status: lmstudio_status,
},
ProviderOption {
name: "Ollama".to_string(),
name: "Ollama (Responses)".to_string(),
status: ollama_status.clone(),
},
ProviderOption {
name: "Ollama (Chat)".to_string(),
status: ollama_status,
},
];

View File

@@ -52,6 +52,7 @@ use codex_core::features::Feature;
use codex_core::models_manager::manager::ModelsManager;
use codex_core::models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG;
use codex_core::models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
use codex_core::protocol::DeprecationNoticeEvent;
use codex_core::protocol::EventMsg;
use codex_core::protocol::FinalOutput;
use codex_core::protocol::ListSkillsResponseEvent;
@@ -162,6 +163,15 @@ fn emit_skill_load_warnings(app_event_tx: &AppEventSender, errors: &[SkillErrorI
}
}
fn emit_deprecation_notice(app_event_tx: &AppEventSender, notice: Option<DeprecationNoticeEvent>) {
let Some(DeprecationNoticeEvent { summary, details }) = notice else {
return;
};
app_event_tx.send(AppEvent::InsertHistoryCell(Box::new(
crate::history_cell::new_deprecation_notice(summary, details),
)));
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct SessionSummary {
usage_line: String,
@@ -408,10 +418,12 @@ impl App {
session_selection: SessionSelection,
feedback: codex_feedback::CodexFeedback,
is_first_run: bool,
ollama_chat_support_notice: Option<DeprecationNoticeEvent>,
) -> Result<AppExitInfo> {
use tokio_stream::StreamExt;
let (app_event_tx, mut app_event_rx) = unbounded_channel();
let app_event_tx = AppEventSender::new(app_event_tx);
emit_deprecation_notice(&app_event_tx, ollama_chat_support_notice);
let thread_manager = Arc::new(ThreadManager::new(
config.codex_home.clone(),

View File

@@ -58,7 +58,7 @@ pub struct Cli {
#[arg(long = "oss", default_value_t = false)]
pub oss: bool,
/// Specify which local provider to use (lmstudio or ollama).
/// Specify which local provider to use (lmstudio, ollama, or ollama-chat).
/// If not specified with --oss, will use config default or show selection.
#[arg(long = "local-provider")]
pub oss_provider: Option<String>,

View File

@@ -9,6 +9,7 @@ pub use app::AppExitInfo;
use codex_app_server_protocol::AuthMode;
use codex_common::oss::ensure_oss_provider_ready;
use codex_common::oss::get_default_model_for_oss_provider;
use codex_common::oss::ollama_chat_deprecation_notice;
use codex_core::AuthManager;
use codex_core::CodexAuth;
use codex_core::INTERACTIVE_SESSION_SOURCES;
@@ -451,6 +452,13 @@ async fn run_ratatui_app(
initial_config
};
let ollama_chat_support_notice = match ollama_chat_deprecation_notice(&config).await {
Ok(notice) => notice,
Err(err) => {
tracing::warn!(?err, "Failed to detect Ollama wire API");
None
}
};
let mut missing_session_exit = |id_str: &str, action: &str| {
error!("Error finding conversation path: {id_str}");
restore();
@@ -614,6 +622,7 @@ async fn run_ratatui_app(
session_selection,
feedback,
should_show_trust_screen, // Proxy to: is it a first run in this directory?
ollama_chat_support_notice,
)
.await;

View File

@@ -4,6 +4,7 @@ use std::sync::LazyLock;
use codex_core::DEFAULT_LMSTUDIO_PORT;
use codex_core::DEFAULT_OLLAMA_PORT;
use codex_core::LMSTUDIO_OSS_PROVIDER_ID;
use codex_core::OLLAMA_CHAT_PROVIDER_ID;
use codex_core::OLLAMA_OSS_PROVIDER_ID;
use codex_core::config::set_default_oss_provider;
use crossterm::event::Event;
@@ -70,10 +71,16 @@ static OSS_SELECT_OPTIONS: LazyLock<Vec<SelectOption>> = LazyLock::new(|| {
},
SelectOption {
label: Line::from(vec!["O".underlined(), "llama".into()]),
description: "Local Ollama server (default port 11434)",
description: "Local Ollama server (Responses API, default port 11434)",
key: KeyCode::Char('o'),
provider_id: OLLAMA_OSS_PROVIDER_ID,
},
SelectOption {
label: Line::from(vec!["Ollama (".into(), "c".underlined(), "hat)".into()]),
description: "Local Ollama server (chat wire API, default port 11434)",
key: KeyCode::Char('c'),
provider_id: OLLAMA_CHAT_PROVIDER_ID,
},
]
});
@@ -99,7 +106,11 @@ impl OssSelectionWidget<'_> {
status: lmstudio_status,
},
ProviderOption {
name: "Ollama".to_string(),
name: "Ollama (Responses)".to_string(),
status: ollama_status.clone(),
},
ProviderOption {
name: "Ollama (Chat)".to_string(),
status: ollama_status,
},
];