This commit is contained in:
jif-oai
2025-10-30 16:24:41 +00:00
parent 43f7733ad7
commit bae5341585
32 changed files with 236 additions and 160 deletions

5
codex-rs/Cargo.lock generated
View File

@@ -1063,6 +1063,7 @@ name = "codex-common"
version = "0.0.0"
dependencies = [
"clap",
"codex-api-client",
"codex-app-server-protocol",
"codex-core",
"codex-protocol",
@@ -1156,6 +1157,7 @@ dependencies = [
"anyhow",
"assert_cmd",
"clap",
"codex-api-client",
"codex-arg0",
"codex-common",
"codex-core",
@@ -1321,6 +1323,7 @@ dependencies = [
"assert_matches",
"async-stream",
"bytes",
"codex-api-client",
"codex-core",
"futures",
"reqwest",
@@ -1462,6 +1465,7 @@ dependencies = [
"chrono",
"clap",
"codex-ansi-escape",
"codex-api-client",
"codex-app-server-protocol",
"codex-arg0",
"codex-common",
@@ -1695,6 +1699,7 @@ version = "0.0.0"
dependencies = [
"anyhow",
"assert_cmd",
"codex-api-client",
"codex-core",
"codex-protocol",
"notify",

View File

@@ -26,6 +26,7 @@ pub use crate::prompt::Prompt;
pub use crate::responses::ResponsesApiClient;
pub use crate::responses::ResponsesApiClientConfig;
pub use crate::responses::stream_from_fixture;
pub use crate::stream::EventStream;
pub use crate::stream::Reasoning;
pub use crate::stream::ResponseEvent;
pub use crate::stream::ResponseStream;

View File

@@ -61,14 +61,23 @@ pub enum ResponseEvent {
RateLimits(RateLimitSnapshot),
}
pub struct ResponseStream {
pub(crate) rx_event: mpsc::Receiver<Result<ResponseEvent>>,
#[derive(Debug)]
pub struct EventStream<T> {
pub(crate) rx_event: mpsc::Receiver<T>,
}
impl Stream for ResponseStream {
type Item = Result<ResponseEvent>;
impl<T> EventStream<T> {
pub fn from_receiver(rx_event: mpsc::Receiver<T>) -> Self {
Self { rx_event }
}
}
impl<T> Stream for EventStream<T> {
type Item = T;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.rx_event.poll_recv(cx)
}
}
pub type ResponseStream = EventStream<Result<ResponseEvent>>;

View File

@@ -8,6 +8,7 @@ workspace = true
[dependencies]
clap = { workspace = true, features = ["derive", "wrap_help"], optional = true }
codex-api-client = { workspace = true }
codex-core = { workspace = true }
codex-protocol = { workspace = true }
codex-app-server-protocol = { workspace = true }

View File

@@ -1,4 +1,4 @@
use codex_core::WireApi;
use codex_api_client::WireApi;
use codex_core::config::Config;
use crate::sandbox_summary::summarize_sandbox_policy;

View File

@@ -1,3 +1,4 @@
use std::fmt;
use std::sync::Arc;
use async_trait::async_trait;
@@ -17,20 +18,19 @@ use codex_protocol::ConversationId;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::protocol::SessionSource;
use futures::Stream;
use futures::StreamExt;
use futures::stream::BoxStream;
use tokio::sync::OnceCell;
use tokio::sync::mpsc;
use tracing::warn;
use crate::AuthManager;
use crate::ModelProviderInfo;
use crate::WireApi;
use crate::client_common::Prompt;
use crate::client_common::ResponseEvent;
use crate::client_common::ResponseStream;
use crate::client_common::create_reasoning_param_for_request;
use crate::client_common::create_text_param_for_request;
use crate::config::Config;
use crate::default_client::CodexHttpClient;
use crate::default_client::create_client;
use crate::error::CodexErr;
use crate::error::ConnectionFailedError;
@@ -43,25 +43,85 @@ use crate::features::Feature;
use crate::flags::CODEX_RS_SSE_FIXTURE;
use crate::model_family::ModelFamily;
use crate::openai_model_info::get_model_info;
use codex_api_client::ModelProviderInfo;
use codex_api_client::WireApi;
#[derive(Debug, Clone)]
#[derive(Clone)]
pub struct ModelClient {
config: Arc<Config>,
auth_manager: Option<Arc<AuthManager>>,
otel_event_manager: OtelEventManager,
client: CodexHttpClient,
http_client: reqwest::Client,
provider: ModelProviderInfo,
backend: Arc<OnceCell<ModelBackend>>,
conversation_id: ConversationId,
effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig,
session_source: SessionSource,
}
impl fmt::Debug for ModelClient {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ModelClient")
.field("provider", &self.provider.name)
.field("model", &self.config.model)
.field("conversation_id", &self.conversation_id)
.field("backend_initialized", &self.backend.get().is_some())
.finish()
}
}
#[derive(Debug, Clone)]
pub struct StreamPayload {
pub prompt: Prompt,
}
type ApiClientStream = BoxStream<'static, ApiClientResult<ResponseEvent>>;
enum ModelBackend {
Responses(ResponsesBackend),
Chat(ChatBackend),
}
impl ModelBackend {
async fn stream(&self, prompt: Prompt) -> ApiClientResult<ApiClientStream> {
match self {
ModelBackend::Responses(backend) => backend.stream(prompt).await,
ModelBackend::Chat(backend) => backend.stream(prompt).await,
}
}
}
struct ResponsesBackend {
client: ResponsesApiClient,
}
impl ResponsesBackend {
async fn stream(&self, prompt: Prompt) -> ApiClientResult<ApiClientStream> {
self.client
.stream(prompt)
.await
.map(|stream| stream.boxed())
}
}
struct ChatBackend {
client: ChatCompletionsApiClient,
show_reasoning: bool,
}
impl ChatBackend {
async fn stream(&self, prompt: Prompt) -> ApiClientResult<ApiClientStream> {
let stream = self.client.stream(prompt).await?;
let stream = if self.show_reasoning {
stream.streaming_mode().boxed()
} else {
stream.aggregate().boxed()
};
Ok(stream)
}
}
#[allow(clippy::too_many_arguments)]
impl ModelClient {
pub fn new(
@@ -74,14 +134,16 @@ impl ModelClient {
conversation_id: ConversationId,
session_source: SessionSource,
) -> Self {
let client = create_client();
let http_client = create_client().clone_inner();
let backend = Arc::new(OnceCell::new());
Self {
config,
auth_manager,
otel_event_manager,
client,
http_client,
provider,
backend,
conversation_id,
effort,
summary,
@@ -119,11 +181,30 @@ impl ModelClient {
pub async fn stream(&self, payload: &StreamPayload) -> Result<ResponseStream> {
let mut prompt = payload.prompt.clone();
self.populate_prompt(&mut prompt);
match self.provider.wire_api {
WireApi::Responses => self.stream_via_responses(prompt).await,
WireApi::Chat => self.stream_via_chat(prompt).await,
if self.provider.wire_api == WireApi::Responses {
if let Some(path) = &*CODEX_RS_SSE_FIXTURE {
warn!(path, "Streaming from fixture");
let stream = stream_from_fixture(
path,
self.provider.clone(),
self.otel_event_manager.clone(),
)
.await
.map_err(map_api_error)?
.boxed();
return Ok(wrap_stream(stream));
}
}
let backend = self
.backend
.get_or_try_init(|| async { self.build_backend().await })
.await
.map_err(map_api_error)?;
let api_stream = backend.stream(prompt).await.map_err(map_api_error)?;
Ok(wrap_stream(api_stream))
}
fn populate_prompt(&self, prompt: &mut Prompt) {
@@ -154,22 +235,20 @@ impl ModelClient {
prompt.text_controls = create_text_param_for_request(verbosity, &prompt.output_schema);
}
async fn stream_via_responses(&self, prompt: Prompt) -> Result<ResponseStream> {
if let Some(path) = &*CODEX_RS_SSE_FIXTURE {
warn!(path, "Streaming from fixture");
let stream =
stream_from_fixture(path, self.provider.clone(), self.otel_event_manager.clone())
.await
.map_err(map_api_error)?;
return Ok(wrap_stream(stream));
async fn build_backend(&self) -> ApiClientResult<ModelBackend> {
match self.provider.wire_api {
WireApi::Responses => self.build_responses_backend().await,
WireApi::Chat => self.build_chat_backend().await,
}
}
async fn build_responses_backend(&self) -> ApiClientResult<ModelBackend> {
let auth_provider = self.auth_manager.as_ref().map(|manager| {
Arc::new(AuthManagerProvider::new(Arc::clone(manager))) as Arc<dyn AuthProvider>
});
let config = ResponsesApiClientConfig {
http_client: self.client.clone_inner(),
http_client: self.http_client.clone(),
provider: self.provider.clone(),
model: self.config.model.clone(),
conversation_id: self.conversation_id,
@@ -177,42 +256,30 @@ impl ModelClient {
otel_event_manager: self.otel_event_manager.clone(),
};
let client = ResponsesApiClient::new(config)
.await
.map_err(map_api_error)?;
let stream = client.stream(prompt).await.map_err(map_api_error)?;
Ok(wrap_stream(stream))
let client = ResponsesApiClient::new(config).await?;
Ok(ModelBackend::Responses(ResponsesBackend { client }))
}
async fn stream_via_chat(&self, prompt: Prompt) -> Result<ResponseStream> {
async fn build_chat_backend(&self) -> ApiClientResult<ModelBackend> {
let show_reasoning = self.config.show_raw_agent_reasoning;
let config = ChatCompletionsApiClientConfig {
http_client: self.client.clone_inner(),
http_client: self.http_client.clone(),
provider: self.provider.clone(),
model: self.config.model.clone(),
otel_event_manager: self.otel_event_manager.clone(),
session_source: self.session_source.clone(),
aggregation_mode: if self.config.show_raw_agent_reasoning {
aggregation_mode: if show_reasoning {
ChatAggregationMode::Streaming
} else {
ChatAggregationMode::AggregatedOnly
},
};
let client = ChatCompletionsApiClient::new(config)
.await
.map_err(map_api_error)?;
let stream = client.stream(prompt).await.map_err(map_api_error)?;
let stream = if self.config.show_raw_agent_reasoning {
stream.streaming_mode()
} else {
stream.aggregate()
};
Ok(wrap_stream(stream))
let client = ChatCompletionsApiClient::new(config).await?;
Ok(ModelBackend::Chat(ChatBackend {
client,
show_reasoning,
}))
}
pub async fn stream_for_test(&self, mut prompt: Prompt) -> Result<ResponseStream> {
@@ -304,15 +371,10 @@ impl AuthProvider for AuthManagerProvider {
}
}
fn wrap_stream<S>(stream: S) -> ResponseStream
where
S: Stream<Item = ApiClientResult<ResponseEvent>> + Send + Unpin + 'static,
{
fn wrap_stream(stream: ApiClientStream) -> ResponseStream {
let (tx, rx) = mpsc::channel::<Result<ResponseEvent>>(1600);
tokio::spawn(async move {
use futures::StreamExt;
let mut stream = stream;
while let Some(item) = stream.next().await {
let mapped = match item {
@@ -326,7 +388,7 @@ where
}
});
ResponseStream { rx_event: rx }
codex_api_client::EventStream::from_receiver(rx)
}
fn map_api_error(err: codex_api_client::Error) -> CodexErr {

View File

@@ -1,13 +1,8 @@
use std::borrow::Cow;
use std::ops::Deref;
use futures::Stream;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
use tokio::sync::mpsc;
use crate::error::Result;
use codex_api_client::EventStream;
pub use codex_api_client::Prompt;
pub use codex_api_client::Reasoning;
pub use codex_api_client::TextControls;
@@ -84,17 +79,7 @@ pub fn create_text_param_for_request(
pub use codex_api_client::ResponseEvent;
pub struct ResponseStream {
pub(crate) rx_event: mpsc::Receiver<Result<ResponseEvent>>,
}
impl Stream for ResponseStream {
type Item = Result<ResponseEvent>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.rx_event.poll_recv(cx)
}
}
pub type ResponseStream = EventStream<Result<ResponseEvent>>;
#[cfg(test)]
mod tests {

View File

@@ -51,7 +51,6 @@ use tracing::error;
use tracing::info;
use tracing::warn;
use crate::ModelProviderInfo;
use crate::client::ModelClient;
use crate::client::StreamPayload;
use crate::client_common::Prompt;
@@ -65,6 +64,7 @@ use crate::error::CodexErr;
use crate::error::Result as CodexResult;
#[cfg(test)]
use crate::exec::StreamOutput;
use codex_api_client::ModelProviderInfo;
// Removed: legacy executor wiring replaced by ToolOrchestrator flows.
// legacy normalize_exec_result no longer used after orchestrator migration
use crate::conversation_history::ResponsesApiChainState;
@@ -362,6 +362,12 @@ pub(crate) struct SessionSettingsUpdate {
pub(crate) final_output_json_schema: Option<Option<Value>>,
}
#[derive(Clone)]
struct PreparedPrompt {
prompt: Prompt,
full_prompt_items: Vec<ResponseItem>,
}
impl Session {
fn make_turn_context(
auth_manager: Option<Arc<AuthManager>>,
@@ -923,37 +929,51 @@ impl Session {
self.send_raw_response_items(turn_context, items).await;
}
async fn prepare_prompt_items(
&self,
turn_context: &TurnContext,
) -> (Vec<ResponseItem>, Vec<ResponseItem>, Option<String>, bool) {
async fn prepare_prompt(&self, turn_context: &TurnContext) -> PreparedPrompt {
let use_chain = turn_context.client.supports_responses_api_chaining();
let mut history = self.clone_history().await;
let full_prompt_items = history.get_history_for_prompt();
let mut prompt = Prompt::default();
prompt.store_response = use_chain;
if !use_chain {
let mut state = self.state.lock().await;
state.reset_responses_api_chain();
return (full_prompt_items.clone(), full_prompt_items, None, false);
{
let mut state = self.state.lock().await;
state.reset_responses_api_chain();
}
prompt.input = full_prompt_items.clone();
return PreparedPrompt {
prompt,
full_prompt_items,
};
}
let mut state = self.state.lock().await;
let mut previous_response_id = None;
let mut request_items = full_prompt_items.clone();
if let Some(chain) = state.responses_api_chain()
&& let Some(prev_id) = chain.last_response_id
{
let prefix = common_prefix_len(&chain.last_prompt_items, &full_prompt_items);
if prefix == 0 && !chain.last_prompt_items.is_empty() {
state.reset_responses_api_chain();
} else {
previous_response_id = Some(prev_id);
request_items = full_prompt_items[prefix..].to_vec();
let mut state = self.state.lock().await;
if let Some(chain) = state.responses_api_chain()
&& let Some(prev_id) = chain.last_response_id
{
let prefix = common_prefix_len(&chain.last_prompt_items, &full_prompt_items);
if prefix == 0 && !chain.last_prompt_items.is_empty() {
state.reset_responses_api_chain();
} else {
previous_response_id = Some(prev_id);
request_items = full_prompt_items[prefix..].to_vec();
}
}
}
(request_items, full_prompt_items, previous_response_id, true)
prompt.previous_response_id = previous_response_id;
prompt.input = request_items;
PreparedPrompt {
prompt,
full_prompt_items,
}
}
fn reconstruct_history_from_rollout(
@@ -1797,11 +1817,11 @@ pub(crate) async fn run_task(
// Construct the input that we will send to the model.
sess.record_conversation_items(&turn_context, &pending_input)
.await;
let (request_items, full_prompt_items, previous_response_id, store_response) =
sess.prepare_prompt_items(&turn_context).await;
let prepared_prompt = sess.prepare_prompt(&turn_context).await;
let turn_input_messages: Vec<String> = {
full_prompt_items
prepared_prompt
.full_prompt_items
.iter()
.filter_map(|item| match item {
ResponseItem::Message { content, .. } => Some(content),
@@ -1819,10 +1839,7 @@ pub(crate) async fn run_task(
Arc::clone(&sess),
Arc::clone(&turn_context),
Arc::clone(&turn_diff_tracker),
request_items,
full_prompt_items,
previous_response_id,
store_response,
prepared_prompt,
cancellation_token.child_token(),
)
.await
@@ -1914,10 +1931,7 @@ async fn run_turn(
sess: Arc<Session>,
turn_context: Arc<TurnContext>,
turn_diff_tracker: SharedTurnDiffTracker,
mut request_items: Vec<ResponseItem>,
mut full_prompt_items: Vec<ResponseItem>,
previous_response_id: Option<String>,
store_response: bool,
mut prepared_prompt: PreparedPrompt,
cancellation_token: CancellationToken,
) -> CodexResult<TurnRunResult> {
let mcp_tools = sess.services.mcp_connection_manager.list_all_tools();
@@ -1929,11 +1943,17 @@ async fn run_turn(
let tool_specs = router.specs();
let (tools_json, has_freeform_apply_patch) =
crate::tools::spec::tools_metadata_for_prompt(&tool_specs)?;
crate::conversation_history::format_prompt_items(&mut request_items, has_freeform_apply_patch);
crate::conversation_history::format_prompt_items(
&mut full_prompt_items,
&mut prepared_prompt.prompt.input,
has_freeform_apply_patch,
);
crate::conversation_history::format_prompt_items(
&mut prepared_prompt.full_prompt_items,
has_freeform_apply_patch,
);
let mut prompt = prepared_prompt.prompt;
let full_prompt_items = prepared_prompt.full_prompt_items;
let apply_patch_present = tool_specs.iter().any(|spec| spec.name() == "apply_patch");
@@ -1949,16 +1969,10 @@ async fn run_turn(
.get_model_family()
.supports_parallel_tool_calls;
let parallel_tool_calls = model_supports_parallel;
let prompt = Prompt {
instructions: instructions.clone(),
input: request_items,
tools: tools_json,
parallel_tool_calls,
output_schema: turn_context.final_output_json_schema.clone(),
store_response,
previous_response_id: previous_response_id.clone(),
..Default::default()
};
prompt.instructions = instructions.clone();
prompt.tools = tools_json;
prompt.parallel_tool_calls = parallel_tool_calls;
prompt.output_schema = turn_context.final_output_json_schema.clone();
let payload = StreamPayload { prompt };

View File

@@ -1,6 +1,4 @@
use crate::ModelProviderInfo;
use crate::auth::AuthCredentialsStoreMode;
use crate::built_in_model_providers;
use crate::config::types::DEFAULT_OTEL_ENVIRONMENT;
use crate::config::types::History;
use crate::config::types::McpServerConfig;
@@ -32,6 +30,8 @@ use crate::project_doc::DEFAULT_PROJECT_DOC_FILENAME;
use crate::project_doc::LOCAL_PROJECT_DOC_FILENAME;
use crate::protocol::AskForApproval;
use crate::protocol::SandboxPolicy;
use codex_api_client::ModelProviderInfo;
use codex_api_client::built_in_model_providers;
use codex_app_server_protocol::Tools;
use codex_app_server_protocol::UserSavedConfig;
use codex_protocol::config_types::ForcedLoginMethod;
@@ -2712,7 +2712,7 @@ model_verbosity = "high"
name: "OpenAI using Chat Completions".to_string(),
base_url: Some("https://api.openai.com/v1".to_string()),
env_key: Some("OPENAI_API_KEY".to_string()),
wire_api: crate::WireApi::Chat,
wire_api: codex_api_client::WireApi::Chat,
env_key_instructions: None,
experimental_bearer_token: None,
query_params: None,

View File

@@ -18,9 +18,11 @@ mod command_safety;
pub mod config;
pub mod config_loader;
mod conversation_history;
mod conversation_manager;
pub mod custom_prompts;
mod environment_context;
pub mod error;
mod event_mapping;
pub mod exec;
pub mod exec_env;
pub mod features;
@@ -33,19 +35,12 @@ mod mcp_tool_call;
mod message_history;
pub mod parse_command;
mod response_processing;
pub mod review_format;
pub mod sandboxing;
pub mod token_data;
mod truncate;
mod unified_exec;
mod user_instructions;
pub use codex_api_client::BUILT_IN_OSS_MODEL_PROVIDER_ID;
pub use codex_api_client::ModelProviderInfo;
pub use codex_api_client::WireApi;
pub use codex_api_client::built_in_model_providers;
pub use codex_api_client::create_oss_provider_with_base_url;
mod conversation_manager;
mod event_mapping;
pub mod review_format;
pub use codex_protocol::protocol::InitialHistory;
pub use conversation_manager::ConversationManager;
pub use conversation_manager::NewConversation;

View File

@@ -5,7 +5,6 @@ use std::time::Duration;
use std::time::Instant;
use crate::AuthManager;
use crate::ModelProviderInfo;
use crate::client::ModelClient;
use crate::client::StreamPayload;
use crate::client_common::Prompt;
@@ -13,6 +12,7 @@ use crate::client_common::ResponseEvent;
use crate::config::Config;
use crate::protocol::SandboxPolicy;
use askama::Template;
use codex_api_client::ModelProviderInfo;
use codex_otel::otel_event_manager::OtelEventManager;
use codex_protocol::ConversationId;
use codex_protocol::models::ContentItem;

View File

@@ -1,15 +1,15 @@
use std::sync::Arc;
use codex_api_client::ModelProviderInfo;
use codex_api_client::WireApi;
use codex_app_server_protocol::AuthMode;
use codex_core::ContentItem;
use codex_core::LocalShellAction;
use codex_core::LocalShellExecAction;
use codex_core::LocalShellStatus;
use codex_core::ModelClient;
use codex_core::ModelProviderInfo;
use codex_core::Prompt;
use codex_core::ResponseItem;
use codex_core::WireApi;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use codex_otel::otel_event_manager::OtelEventManager;
use codex_protocol::ConversationId;

View File

@@ -2,14 +2,14 @@ use assert_matches::assert_matches;
use std::sync::Arc;
use tracing_test::traced_test;
use codex_api_client::ModelProviderInfo;
use codex_api_client::WireApi;
use codex_app_server_protocol::AuthMode;
use codex_core::ContentItem;
use codex_core::ModelClient;
use codex_core::ModelProviderInfo;
use codex_core::Prompt;
use codex_core::ResponseEvent;
use codex_core::ResponseItem;
use codex_core::WireApi;
use codex_core::spawn::CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR;
use codex_otel::otel_event_manager::OtelEventManager;
use codex_protocol::ConversationId;

View File

@@ -9,6 +9,7 @@ path = "lib.rs"
[dependencies]
anyhow = { workspace = true }
assert_cmd = { workspace = true }
codex-api-client = { workspace = true }
codex-core = { workspace = true }
codex-protocol = { workspace = true }
notify = { workspace = true }

View File

@@ -4,11 +4,11 @@ use std::path::PathBuf;
use std::sync::Arc;
use anyhow::Result;
use codex_api_client::ModelProviderInfo;
use codex_api_client::built_in_model_providers;
use codex_core::CodexAuth;
use codex_core::CodexConversation;
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::built_in_model_providers;
use codex_core::config::Config;
use codex_core::features::Feature;
use codex_core::protocol::AskForApproval;

View File

@@ -1,13 +1,13 @@
use std::sync::Arc;
use codex_api_client::ModelProviderInfo;
use codex_api_client::WireApi;
use codex_app_server_protocol::AuthMode;
use codex_core::ContentItem;
use codex_core::ModelClient;
use codex_core::ModelProviderInfo;
use codex_core::Prompt;
use codex_core::ResponseEvent;
use codex_core::ResponseItem;
use codex_core::WireApi;
use codex_otel::otel_event_manager::OtelEventManager;
use codex_protocol::ConversationId;
use codex_protocol::protocol::SessionSource;

View File

@@ -1,3 +1,6 @@
use codex_api_client::ModelProviderInfo;
use codex_api_client::WireApi;
use codex_api_client::built_in_model_providers;
use codex_app_server_protocol::AuthMode;
use codex_core::CodexAuth;
use codex_core::ContentItem;
@@ -6,14 +9,11 @@ use codex_core::LocalShellAction;
use codex_core::LocalShellExecAction;
use codex_core::LocalShellStatus;
use codex_core::ModelClient;
use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
use codex_core::Prompt;
use codex_core::ResponseEvent;
use codex_core::ResponseItem;
use codex_core::WireApi;
use codex_core::auth::AuthCredentialsStoreMode;
use codex_core::built_in_model_providers;
use codex_core::error::CodexErr;
use codex_core::features::Feature;
use codex_core::model_family::find_family_for_model;

View File

@@ -1,8 +1,8 @@
use codex_api_client::ModelProviderInfo;
use codex_api_client::built_in_model_providers;
use codex_core::CodexAuth;
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
use codex_core::built_in_model_providers;
use codex_core::protocol::ErrorEvent;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;

View File

@@ -9,12 +9,12 @@
use super::compact::FIRST_REPLY;
use super::compact::SUMMARY_TEXT;
use codex_api_client::ModelProviderInfo;
use codex_api_client::built_in_model_providers;
use codex_core::CodexAuth;
use codex_core::CodexConversation;
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
use codex_core::built_in_model_providers;
use codex_core::codex::compact::SUMMARIZATION_PROMPT;
use codex_core::config::Config;
use codex_core::config::OPENAI_DEFAULT_MODEL;

View File

@@ -1,8 +1,8 @@
use codex_api_client::ModelProviderInfo;
use codex_api_client::built_in_model_providers;
use codex_core::CodexAuth;
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
use codex_core::built_in_model_providers;
use codex_core::parse_turn_item;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;

View File

@@ -1,9 +1,9 @@
#![allow(clippy::unwrap_used)]
use codex_api_client::ModelProviderInfo;
use codex_api_client::built_in_model_providers;
use codex_core::CodexAuth;
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::built_in_model_providers;
use codex_core::features::Feature;
use codex_core::model_family::find_family_for_model;
use codex_core::protocol::EventMsg;

View File

@@ -1,9 +1,9 @@
#![allow(clippy::unwrap_used)]
use codex_api_client::ModelProviderInfo;
use codex_api_client::built_in_model_providers;
use codex_core::CodexAuth;
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::built_in_model_providers;
use codex_core::config::OPENAI_DEFAULT_MODEL;
use codex_core::features::Feature;
use codex_core::model_family::find_family_for_model;

View File

@@ -1,11 +1,11 @@
use codex_api_client::ModelProviderInfo;
use codex_api_client::built_in_model_providers;
use codex_core::CodexAuth;
use codex_core::CodexConversation;
use codex_core::ContentItem;
use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::REVIEW_PROMPT;
use codex_core::ResponseItem;
use codex_core::built_in_model_providers;
use codex_core::config::Config;
use codex_core::protocol::ENVIRONMENT_CONTEXT_OPEN_TAG;
use codex_core::protocol::EventMsg;

View File

@@ -422,7 +422,7 @@ async fn stdio_image_completions_round_trip() -> anyhow::Result<()> {
let fixture = test_codex()
.with_config(move |config| {
config.model_provider.wire_api = codex_core::WireApi::Chat;
config.model_provider.wire_api = codex_api_client::WireApi::Chat;
config.features.enable(Feature::RmcpClient);
config.mcp_servers.insert(
server_name.to_string(),

View File

@@ -1,7 +1,7 @@
use std::time::Duration;
use codex_core::ModelProviderInfo;
use codex_core::WireApi;
use codex_api_client::ModelProviderInfo;
use codex_api_client::WireApi;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;
use codex_protocol::user_input::UserInput;

View File

@@ -3,8 +3,8 @@
use std::time::Duration;
use codex_core::ModelProviderInfo;
use codex_core::WireApi;
use codex_api_client::ModelProviderInfo;
use codex_api_client::WireApi;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;
use codex_protocol::user_input::UserInput;

View File

@@ -24,6 +24,7 @@ codex-common = { workspace = true, features = [
"sandbox_summary",
] }
codex-core = { workspace = true }
codex-api-client = { workspace = true }
codex-ollama = { workspace = true }
codex-protocol = { workspace = true }
mcp-types = { workspace = true }

View File

@@ -11,8 +11,8 @@ pub mod event_processor_with_jsonl_output;
pub mod exec_events;
pub use cli::Cli;
use codex_api_client::BUILT_IN_OSS_MODEL_PROVIDER_ID;
use codex_core::AuthManager;
use codex_core::BUILT_IN_OSS_MODEL_PROVIDER_ID;
use codex_core::ConversationManager;
use codex_core::NewConversation;
use codex_core::auth::enforce_login_restrictions;

View File

@@ -13,6 +13,7 @@ workspace = true
[dependencies]
async-stream = { workspace = true }
bytes = { workspace = true }
codex-api-client = { workspace = true }
codex-core = { workspace = true }
futures = { workspace = true }
reqwest = { workspace = true, features = ["json", "stream"] }

View File

@@ -10,9 +10,9 @@ use crate::pull::PullEvent;
use crate::pull::PullProgressReporter;
use crate::url::base_url_to_host_root;
use crate::url::is_openai_compatible_base_url;
use codex_core::BUILT_IN_OSS_MODEL_PROVIDER_ID;
use codex_core::ModelProviderInfo;
use codex_core::WireApi;
use codex_api_client::BUILT_IN_OSS_MODEL_PROVIDER_ID;
use codex_api_client::ModelProviderInfo;
use codex_api_client::WireApi;
use codex_core::config::Config;
const OLLAMA_CONNECTION_ERROR: &str = "No running Ollama server detected. Start it with: `ollama serve` (after installing). Install instructions: https://github.com/ollama/ollama?tab=readme-ov-file#ollama";
@@ -47,7 +47,7 @@ impl OllamaClient {
#[cfg(test)]
async fn try_from_provider_with_base_url(base_url: &str) -> io::Result<Self> {
let provider = codex_core::create_oss_provider_with_base_url(base_url);
let provider = codex_api_client::create_oss_provider_with_base_url(base_url);
Self::try_from_provider(&provider).await
}

View File

@@ -34,6 +34,7 @@ codex-common = { workspace = true, features = [
"sandbox_summary",
] }
codex-core = { workspace = true }
codex-api-client = { workspace = true }
codex-file-search = { workspace = true }
codex-login = { workspace = true }
codex-ollama = { workspace = true }

View File

@@ -6,9 +6,9 @@
use additional_dirs::add_dir_warning_message;
use app::App;
pub use app::AppExitInfo;
use codex_api_client::BUILT_IN_OSS_MODEL_PROVIDER_ID;
use codex_app_server_protocol::AuthMode;
use codex_core::AuthManager;
use codex_core::BUILT_IN_OSS_MODEL_PROVIDER_ID;
use codex_core::CodexAuth;
use codex_core::INTERACTIVE_SESSION_SOURCES;
use codex_core::RolloutRecorder;