mirror of
https://github.com/openai/codex.git
synced 2026-02-02 06:57:03 +00:00
Compare commits
19 Commits
remove/doc
...
async-star
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f26ebccea6 | ||
|
|
446cc27c95 | ||
|
|
9f436b1a26 | ||
|
|
4775338ca2 | ||
|
|
94546a492f | ||
|
|
e516461241 | ||
|
|
997a8363ba | ||
|
|
e2fe972e74 | ||
|
|
bd90dd2349 | ||
|
|
a45d2fa413 | ||
|
|
3098b49a88 | ||
|
|
49490e4ca3 | ||
|
|
958d106983 | ||
|
|
d0516eac5d | ||
|
|
bed4d268c5 | ||
|
|
3e5015f51c | ||
|
|
9cc4748524 | ||
|
|
a010965005 | ||
|
|
cc55cbb357 |
@@ -1300,7 +1300,7 @@ impl CodexMessageProcessor {
|
||||
} = conversation_id;
|
||||
let response = NewConversationResponse {
|
||||
conversation_id,
|
||||
model: session_configured.model,
|
||||
model: session_configured.model_family.slug,
|
||||
reasoning_effort: session_configured.reasoning_effort,
|
||||
rollout_path: session_configured.rollout_path,
|
||||
};
|
||||
@@ -1374,7 +1374,7 @@ impl CodexMessageProcessor {
|
||||
};
|
||||
|
||||
let SessionConfiguredEvent {
|
||||
model,
|
||||
model_family,
|
||||
model_provider_id,
|
||||
cwd,
|
||||
approval_policy,
|
||||
@@ -1383,7 +1383,7 @@ impl CodexMessageProcessor {
|
||||
} = session_configured;
|
||||
let response = ThreadStartResponse {
|
||||
thread: thread.clone(),
|
||||
model,
|
||||
model: model_family.slug,
|
||||
model_provider: model_provider_id,
|
||||
cwd,
|
||||
approval_policy: approval_policy.into(),
|
||||
@@ -1717,7 +1717,7 @@ impl CodexMessageProcessor {
|
||||
|
||||
let response = ThreadResumeResponse {
|
||||
thread,
|
||||
model: session_configured.model,
|
||||
model: session_configured.model_family.slug,
|
||||
model_provider: session_configured.model_provider_id,
|
||||
cwd: session_configured.cwd,
|
||||
approval_policy: session_configured.approval_policy.into(),
|
||||
@@ -2330,7 +2330,7 @@ impl CodexMessageProcessor {
|
||||
.send_server_notification(ServerNotification::SessionConfigured(
|
||||
SessionConfiguredNotification {
|
||||
session_id: session_configured.session_id,
|
||||
model: session_configured.model.clone(),
|
||||
model: session_configured.model_family.slug.clone(),
|
||||
reasoning_effort: session_configured.reasoning_effort,
|
||||
history_log_id: session_configured.history_log_id,
|
||||
history_entry_count: session_configured.history_entry_count,
|
||||
@@ -2346,7 +2346,7 @@ impl CodexMessageProcessor {
|
||||
// Reply with conversation id + model and initial messages (when present)
|
||||
let response = ResumeConversationResponse {
|
||||
conversation_id,
|
||||
model: session_configured.model.clone(),
|
||||
model: session_configured.model_family.slug.clone(),
|
||||
initial_messages,
|
||||
rollout_path: session_configured.rollout_path.clone(),
|
||||
};
|
||||
|
||||
@@ -49,9 +49,9 @@ use crate::features::FEATURES;
|
||||
use crate::flags::CODEX_RS_SSE_FIXTURE;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
use crate::model_provider_info::WireApi;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
use crate::tools::spec::create_tools_json_for_chat_completions_api;
|
||||
use crate::tools::spec::create_tools_json_for_responses_api;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ModelClient {
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use crate::client_common::tools::ToolSpec;
|
||||
use crate::error::Result;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
pub use codex_api::common::ResponseEvent;
|
||||
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use futures::Stream;
|
||||
use serde::Deserialize;
|
||||
use serde_json::Value;
|
||||
|
||||
@@ -17,14 +17,12 @@ use crate::exec_policy::ExecPolicyManager;
|
||||
use crate::features::Feature;
|
||||
use crate::features::Features;
|
||||
use crate::models_manager::manager::ModelsManager;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
use crate::parse_command::parse_command;
|
||||
use crate::parse_turn_item;
|
||||
use crate::stream_events_utils::HandleOutputCtx;
|
||||
use crate::stream_events_utils::handle_non_tool_response_item;
|
||||
use crate::stream_events_utils::handle_output_item_done;
|
||||
use crate::terminal;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
use crate::user_notification::UserNotifier;
|
||||
use crate::util::error_or_panic;
|
||||
use async_channel::Receiver;
|
||||
@@ -32,6 +30,8 @@ use async_channel::Sender;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::approvals::ExecPolicyAmendment;
|
||||
use codex_protocol::items::TurnItem;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
use codex_protocol::protocol::FileChange;
|
||||
use codex_protocol::protocol::HasLegacyEvent;
|
||||
use codex_protocol::protocol::ItemCompletedEvent;
|
||||
@@ -527,7 +527,7 @@ impl Session {
|
||||
final_output_json_schema: None,
|
||||
codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(),
|
||||
tool_call_gate: Arc::new(ReadinessFlag::new()),
|
||||
truncation_policy: TruncationPolicy::new(
|
||||
truncation_policy: crate::truncate::new_truncation_policy(
|
||||
per_turn_config.as_ref(),
|
||||
model_family.truncation_policy,
|
||||
),
|
||||
@@ -681,11 +681,14 @@ impl Session {
|
||||
// Dispatch the SessionConfiguredEvent first and then report any errors.
|
||||
// If resuming, include converted initial messages in the payload so UIs can render them immediately.
|
||||
let initial_messages = initial_history.get_event_msgs();
|
||||
let session_model_family = models_manager
|
||||
.construct_model_family(session_configuration.model.as_str(), config.as_ref())
|
||||
.await;
|
||||
let events = std::iter::once(Event {
|
||||
id: INITIAL_SUBMIT_ID.to_owned(),
|
||||
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model: session_configuration.model.clone(),
|
||||
model_family: session_model_family,
|
||||
model_provider_id: config.model_provider_id.clone(),
|
||||
approval_policy: session_configuration.approval_policy.value(),
|
||||
sandbox_policy: session_configuration.sandbox_policy.get().clone(),
|
||||
@@ -2144,7 +2147,10 @@ async fn spawn_review_thread(
|
||||
final_output_json_schema: None,
|
||||
codex_linux_sandbox_exe: parent_turn_context.codex_linux_sandbox_exe.clone(),
|
||||
tool_call_gate: Arc::new(ReadinessFlag::new()),
|
||||
truncation_policy: TruncationPolicy::new(&per_turn_config, model_family.truncation_policy),
|
||||
truncation_policy: crate::truncate::new_truncation_policy(
|
||||
&per_turn_config,
|
||||
model_family.truncation_policy,
|
||||
),
|
||||
};
|
||||
|
||||
// Seed the child task with the review prompt as the initial user message.
|
||||
|
||||
@@ -15,7 +15,6 @@ use crate::protocol::EventMsg;
|
||||
use crate::protocol::TaskStartedEvent;
|
||||
use crate::protocol::TurnContextItem;
|
||||
use crate::protocol::WarningEvent;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
use crate::truncate::approx_token_count;
|
||||
use crate::truncate::truncate_text;
|
||||
use crate::util::backoff;
|
||||
@@ -23,6 +22,7 @@ use codex_protocol::items::TurnItem;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::ResponseInputItem;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
use codex_protocol::protocol::RolloutItem;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use futures::prelude::*;
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use crate::codex::TurnContext;
|
||||
use crate::context_manager::normalize;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
use crate::truncate::approx_token_count;
|
||||
use crate::truncate::approx_tokens_from_byte_count;
|
||||
use crate::truncate::truncate_function_output_items_with_policy;
|
||||
@@ -9,6 +8,7 @@ use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::FunctionCallOutputContentItem;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
use codex_protocol::models::ResponseItem;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
use codex_protocol::protocol::TokenUsage;
|
||||
use codex_protocol::protocol::TokenUsageInfo;
|
||||
use std::ops::Deref;
|
||||
@@ -225,7 +225,7 @@ impl ContextManager {
|
||||
}
|
||||
|
||||
fn process_item(&self, item: &ResponseItem, policy: TruncationPolicy) -> ResponseItem {
|
||||
let policy_with_serialization_budget = policy.mul(1.2);
|
||||
let policy_with_serialization_budget = policy * 1.2;
|
||||
match item {
|
||||
ResponseItem::FunctionCallOutput { call_id, output } => {
|
||||
let truncated =
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use super::*;
|
||||
use crate::truncate;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
use codex_git::GhostCommit;
|
||||
use codex_protocol::models::ContentItem;
|
||||
use codex_protocol::models::FunctionCallOutputPayload;
|
||||
@@ -9,6 +8,7 @@ use codex_protocol::models::LocalShellExecAction;
|
||||
use codex_protocol::models::LocalShellStatus;
|
||||
use codex_protocol::models::ReasoningItemContent;
|
||||
use codex_protocol::models::ReasoningItemReasoningSummary;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
use pretty_assertions::assert_eq;
|
||||
use regex_lite::Regex;
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use crate::exec::ExecToolCallOutput;
|
||||
use crate::token_data::KnownPlan;
|
||||
use crate::token_data::PlanType;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
use crate::truncate::truncate_text;
|
||||
use chrono::DateTime;
|
||||
use chrono::Datelike;
|
||||
@@ -9,6 +8,7 @@ use chrono::Local;
|
||||
use chrono::Utc;
|
||||
use codex_async_utils::CancelErr;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
use codex_protocol::protocol::CodexErrorInfo;
|
||||
use codex_protocol::protocol::ErrorEvent;
|
||||
use codex_protocol::protocol::RateLimitSnapshot;
|
||||
|
||||
@@ -10,6 +10,7 @@ use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::sync::TryLockError;
|
||||
use tracing::error;
|
||||
@@ -24,11 +25,12 @@ use crate::default_client::build_reqwest_client;
|
||||
use crate::error::Result as CoreResult;
|
||||
use crate::features::Feature;
|
||||
use crate::model_provider_info::ModelProviderInfo;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
use crate::models_manager::model_presets::builtin_model_presets;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
|
||||
const MODEL_CACHE_FILE: &str = "models_cache.json";
|
||||
const DEFAULT_MODEL_CACHE_TTL: Duration = Duration::from_secs(300);
|
||||
const MODELS_REFRESH_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
const OPENAI_DEFAULT_API_MODEL: &str = "gpt-5.1-codex-max";
|
||||
const OPENAI_DEFAULT_CHATGPT_MODEL: &str = "gpt-5.2-codex";
|
||||
const CODEX_AUTO_BALANCED_MODEL: &str = "codex-auto-balanced";
|
||||
@@ -39,6 +41,7 @@ pub struct ModelsManager {
|
||||
// todo(aibrahim) merge available_models and model family creation into one struct
|
||||
local_models: Vec<ModelPreset>,
|
||||
remote_models: RwLock<Vec<ModelInfo>>,
|
||||
refresh_lock: Mutex<()>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
etag: RwLock<Option<String>>,
|
||||
codex_home: PathBuf,
|
||||
@@ -53,6 +56,7 @@ impl ModelsManager {
|
||||
Self {
|
||||
local_models: builtin_model_presets(auth_manager.get_auth_mode()),
|
||||
remote_models: RwLock::new(Self::load_remote_models_from_file().unwrap_or_default()),
|
||||
refresh_lock: Mutex::new(()),
|
||||
auth_manager,
|
||||
etag: RwLock::new(None),
|
||||
codex_home,
|
||||
@@ -68,6 +72,7 @@ impl ModelsManager {
|
||||
Self {
|
||||
local_models: builtin_model_presets(auth_manager.get_auth_mode()),
|
||||
remote_models: RwLock::new(Self::load_remote_models_from_file().unwrap_or_default()),
|
||||
refresh_lock: Mutex::new(()),
|
||||
auth_manager,
|
||||
etag: RwLock::new(None),
|
||||
codex_home,
|
||||
@@ -83,6 +88,11 @@ impl ModelsManager {
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Prevent duplicate `/models` refreshes when multiple callers try to refresh
|
||||
// concurrently during startup (or when multiple features request models).
|
||||
let _guard = self.refresh_lock.lock().await;
|
||||
|
||||
if self.try_load_cache().await {
|
||||
return Ok(());
|
||||
}
|
||||
@@ -94,10 +104,23 @@ impl ModelsManager {
|
||||
let client = ModelsClient::new(transport, api_provider, api_auth);
|
||||
|
||||
let client_version = format_client_version_to_whole();
|
||||
let ModelsResponse { models, etag } = client
|
||||
.list_models(&client_version, HeaderMap::new())
|
||||
.await
|
||||
.map_err(map_api_error)?;
|
||||
let response = tokio::time::timeout(
|
||||
MODELS_REFRESH_TIMEOUT,
|
||||
client.list_models(&client_version, HeaderMap::new()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let ModelsResponse { models, etag } = match response {
|
||||
Ok(response) => response.map_err(map_api_error)?,
|
||||
Err(_) => {
|
||||
error!(
|
||||
"timed out refreshing /models after {}s",
|
||||
MODELS_REFRESH_TIMEOUT.as_secs()
|
||||
);
|
||||
// Leave `remote_models` unchanged so the preloaded fallback remains available.
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let etag = (!etag.is_empty()).then_some(etag);
|
||||
|
||||
@@ -126,9 +149,11 @@ impl ModelsManager {
|
||||
|
||||
/// Look up the requested model family while applying remote metadata overrides.
|
||||
pub async fn construct_model_family(&self, model: &str, config: &Config) -> ModelFamily {
|
||||
Self::find_family_for_model(model)
|
||||
.with_remote_overrides(self.remote_models(config).await)
|
||||
.with_config_overrides(config)
|
||||
crate::models_manager::model_family::with_config_overrides(
|
||||
Self::find_family_for_model(model)
|
||||
.with_remote_overrides(self.remote_models(config).await),
|
||||
config,
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn get_model(&self, model: &Option<String>, config: &Config) -> String {
|
||||
@@ -162,7 +187,10 @@ impl ModelsManager {
|
||||
#[cfg(any(test, feature = "test-support"))]
|
||||
/// Offline helper that builds a `ModelFamily` without consulting remote state.
|
||||
pub fn construct_model_family_offline(model: &str, config: &Config) -> ModelFamily {
|
||||
Self::find_family_for_model(model).with_config_overrides(config)
|
||||
crate::models_manager::model_family::with_config_overrides(
|
||||
Self::find_family_for_model(model),
|
||||
config,
|
||||
)
|
||||
}
|
||||
|
||||
/// Replace the cached remote models and rebuild the derived presets list.
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use codex_protocol::config_types::Verbosity;
|
||||
use codex_protocol::openai_models::ApplyPatchToolType;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
|
||||
/// The `instructions` field in the payload sent to a model should always start
|
||||
/// with this content.
|
||||
@@ -19,147 +19,20 @@ const GPT_5_1_CODEX_MAX_INSTRUCTIONS: &str = include_str!("../../gpt-5.1-codex-m
|
||||
const GPT_5_2_CODEX_INSTRUCTIONS: &str = include_str!("../../gpt-5.2-codex_prompt.md");
|
||||
pub(crate) const CONTEXT_WINDOW_272K: i64 = 272_000;
|
||||
|
||||
/// A model family is a group of models that share certain characteristics.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct ModelFamily {
|
||||
/// The full model slug used to derive this model family, e.g.
|
||||
/// "gpt-4.1-2025-04-14".
|
||||
pub slug: String,
|
||||
|
||||
/// The model family name, e.g. "gpt-4.1". This string is used when deriving
|
||||
/// default metadata for the family, such as context windows.
|
||||
pub family: String,
|
||||
|
||||
/// True if the model needs additional instructions on how to use the
|
||||
/// "virtual" `apply_patch` CLI.
|
||||
pub needs_special_apply_patch_instructions: bool,
|
||||
|
||||
/// Maximum supported context window, if known.
|
||||
pub context_window: Option<i64>,
|
||||
|
||||
/// Token threshold for automatic compaction if config does not override it.
|
||||
auto_compact_token_limit: Option<i64>,
|
||||
|
||||
// Whether the `reasoning` field can be set when making a request to this
|
||||
// model family. Note it has `effort` and `summary` subfields (though
|
||||
// `summary` is optional).
|
||||
pub supports_reasoning_summaries: bool,
|
||||
|
||||
// The reasoning effort to use for this model family when none is explicitly chosen.
|
||||
pub default_reasoning_effort: Option<ReasoningEffort>,
|
||||
|
||||
// Define if we need a special handling of reasoning summary
|
||||
pub reasoning_summary_format: ReasoningSummaryFormat,
|
||||
|
||||
/// Whether this model supports parallel tool calls when using the
|
||||
/// Responses API.
|
||||
pub supports_parallel_tool_calls: bool,
|
||||
|
||||
/// Present if the model performs better when `apply_patch` is provided as
|
||||
/// a tool call instead of just a bash command
|
||||
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
|
||||
|
||||
// Instructions to use for querying the model
|
||||
pub base_instructions: String,
|
||||
|
||||
/// Names of beta tools that should be exposed to this model family.
|
||||
pub experimental_supported_tools: Vec<String>,
|
||||
|
||||
/// Percentage of the context window considered usable for inputs, after
|
||||
/// reserving headroom for system prompts, tool overhead, and model output.
|
||||
/// This is applied when computing the effective context window seen by
|
||||
/// consumers.
|
||||
pub effective_context_window_percent: i64,
|
||||
|
||||
/// If the model family supports setting the verbosity level when using Responses API.
|
||||
pub support_verbosity: bool,
|
||||
|
||||
// The default verbosity level for this model family when using Responses API.
|
||||
pub default_verbosity: Option<Verbosity>,
|
||||
|
||||
/// Preferred shell tool type for this model family when features do not override it.
|
||||
pub shell_type: ConfigShellToolType,
|
||||
|
||||
pub truncation_policy: TruncationPolicy,
|
||||
}
|
||||
|
||||
impl ModelFamily {
|
||||
pub(super) fn with_config_overrides(mut self, config: &Config) -> Self {
|
||||
if let Some(supports_reasoning_summaries) = config.model_supports_reasoning_summaries {
|
||||
self.supports_reasoning_summaries = supports_reasoning_summaries;
|
||||
}
|
||||
if let Some(reasoning_summary_format) = config.model_reasoning_summary_format.as_ref() {
|
||||
self.reasoning_summary_format = reasoning_summary_format.clone();
|
||||
}
|
||||
if let Some(context_window) = config.model_context_window {
|
||||
self.context_window = Some(context_window);
|
||||
}
|
||||
if let Some(auto_compact_token_limit) = config.model_auto_compact_token_limit {
|
||||
self.auto_compact_token_limit = Some(auto_compact_token_limit);
|
||||
}
|
||||
self
|
||||
pub fn with_config_overrides(mut mf: ModelFamily, config: &Config) -> ModelFamily {
|
||||
if let Some(supports_reasoning_summaries) = config.model_supports_reasoning_summaries {
|
||||
mf.supports_reasoning_summaries = supports_reasoning_summaries;
|
||||
}
|
||||
pub(super) fn with_remote_overrides(mut self, remote_models: Vec<ModelInfo>) -> Self {
|
||||
for model in remote_models {
|
||||
if model.slug == self.slug {
|
||||
self.apply_remote_overrides(model);
|
||||
}
|
||||
}
|
||||
self
|
||||
if let Some(reasoning_summary_format) = config.model_reasoning_summary_format.as_ref() {
|
||||
mf.reasoning_summary_format = reasoning_summary_format.clone();
|
||||
}
|
||||
|
||||
fn apply_remote_overrides(&mut self, model: ModelInfo) {
|
||||
let ModelInfo {
|
||||
slug: _,
|
||||
display_name: _,
|
||||
description: _,
|
||||
default_reasoning_level,
|
||||
supported_reasoning_levels: _,
|
||||
shell_type,
|
||||
visibility: _,
|
||||
supported_in_api: _,
|
||||
priority: _,
|
||||
upgrade: _,
|
||||
base_instructions,
|
||||
supports_reasoning_summaries,
|
||||
support_verbosity,
|
||||
default_verbosity,
|
||||
apply_patch_tool_type,
|
||||
truncation_policy,
|
||||
supports_parallel_tool_calls,
|
||||
context_window,
|
||||
reasoning_summary_format,
|
||||
experimental_supported_tools,
|
||||
} = model;
|
||||
|
||||
self.default_reasoning_effort = Some(default_reasoning_level);
|
||||
self.shell_type = shell_type;
|
||||
if let Some(base) = base_instructions {
|
||||
self.base_instructions = base;
|
||||
}
|
||||
self.supports_reasoning_summaries = supports_reasoning_summaries;
|
||||
self.support_verbosity = support_verbosity;
|
||||
self.default_verbosity = default_verbosity;
|
||||
self.apply_patch_tool_type = apply_patch_tool_type;
|
||||
self.truncation_policy = truncation_policy.into();
|
||||
self.supports_parallel_tool_calls = supports_parallel_tool_calls;
|
||||
self.context_window = context_window;
|
||||
self.reasoning_summary_format = reasoning_summary_format;
|
||||
self.experimental_supported_tools = experimental_supported_tools;
|
||||
if let Some(context_window) = config.model_context_window {
|
||||
mf.context_window = Some(context_window);
|
||||
}
|
||||
|
||||
pub fn auto_compact_token_limit(&self) -> Option<i64> {
|
||||
self.auto_compact_token_limit
|
||||
.or(self.context_window.map(Self::default_auto_compact_limit))
|
||||
}
|
||||
|
||||
const fn default_auto_compact_limit(context_window: i64) -> i64 {
|
||||
(context_window * 9) / 10
|
||||
}
|
||||
|
||||
pub fn get_model_slug(&self) -> &str {
|
||||
&self.slug
|
||||
if let Some(auto_compact_token_limit) = config.model_auto_compact_token_limit {
|
||||
mf.auto_compact_token_limit = Some(auto_compact_token_limit);
|
||||
}
|
||||
mf
|
||||
}
|
||||
|
||||
macro_rules! model_family {
|
||||
@@ -433,6 +306,7 @@ fn derive_default_model_family(model: &str) -> ModelFamily {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_protocol::openai_models::ModelInfo;
|
||||
use codex_protocol::openai_models::ModelVisibility;
|
||||
use codex_protocol::openai_models::ReasoningEffortPreset;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::context_manager::ContextManager;
|
||||
use crate::protocol::RateLimitSnapshot;
|
||||
use crate::protocol::TokenUsage;
|
||||
use crate::protocol::TokenUsageInfo;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
|
||||
/// Persistent, session-scoped state previously stored directly on `Session`.
|
||||
pub(crate) struct SessionState {
|
||||
|
||||
@@ -10,9 +10,9 @@ pub mod sandboxing;
|
||||
pub mod spec;
|
||||
|
||||
use crate::exec::ExecToolCallOutput;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
use crate::truncate::formatted_truncate_text;
|
||||
use crate::truncate::truncate_text;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
pub use router::ToolRouter;
|
||||
use serde::Serialize;
|
||||
|
||||
|
||||
@@ -2,13 +2,13 @@ use crate::client_common::tools::ResponsesApiTool;
|
||||
use crate::client_common::tools::ToolSpec;
|
||||
use crate::features::Feature;
|
||||
use crate::features::Features;
|
||||
use crate::models_manager::model_family::ModelFamily;
|
||||
use crate::tools::handlers::PLAN_TOOL;
|
||||
use crate::tools::handlers::apply_patch::create_apply_patch_freeform_tool;
|
||||
use crate::tools::handlers::apply_patch::create_apply_patch_json_tool;
|
||||
use crate::tools::registry::ToolRegistryBuilder;
|
||||
use codex_protocol::openai_models::ApplyPatchToolType;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use serde_json::Value as JsonValue;
|
||||
|
||||
@@ -4,99 +4,63 @@
|
||||
|
||||
use crate::config::Config;
|
||||
use codex_protocol::models::FunctionCallOutputContentItem;
|
||||
use codex_protocol::openai_models::TruncationMode;
|
||||
use codex_protocol::openai_models::TruncationPolicyConfig;
|
||||
use codex_protocol::protocol::TruncationPolicy as ProtocolTruncationPolicy;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
|
||||
const APPROX_BYTES_PER_TOKEN: usize = 4;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum TruncationPolicy {
|
||||
Bytes(usize),
|
||||
Tokens(usize),
|
||||
}
|
||||
/// Create a new `TruncationPolicy` with config overrides applied.
|
||||
pub fn new_truncation_policy(
|
||||
config: &Config,
|
||||
truncation_policy: TruncationPolicy,
|
||||
) -> TruncationPolicy {
|
||||
let config_token_limit = config.tool_output_token_limit;
|
||||
|
||||
impl From<TruncationPolicy> for ProtocolTruncationPolicy {
|
||||
fn from(value: TruncationPolicy) -> Self {
|
||||
match value {
|
||||
TruncationPolicy::Bytes(bytes) => Self::Bytes(bytes),
|
||||
TruncationPolicy::Tokens(tokens) => Self::Tokens(tokens),
|
||||
match truncation_policy {
|
||||
TruncationPolicy::Bytes(family_bytes) => {
|
||||
if let Some(token_limit) = config_token_limit {
|
||||
TruncationPolicy::Bytes(approx_bytes_for_tokens(token_limit))
|
||||
} else {
|
||||
TruncationPolicy::Bytes(family_bytes)
|
||||
}
|
||||
}
|
||||
TruncationPolicy::Tokens(family_tokens) => {
|
||||
if let Some(token_limit) = config_token_limit {
|
||||
TruncationPolicy::Tokens(token_limit)
|
||||
} else {
|
||||
TruncationPolicy::Tokens(family_tokens)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TruncationPolicyConfig> for TruncationPolicy {
|
||||
fn from(config: TruncationPolicyConfig) -> Self {
|
||||
match config.mode {
|
||||
TruncationMode::Bytes => Self::Bytes(config.limit as usize),
|
||||
TruncationMode::Tokens => Self::Tokens(config.limit as usize),
|
||||
/// Returns a token budget derived from this policy.
|
||||
///
|
||||
/// - For `Tokens`, this is the explicit token limit.
|
||||
/// - For `Bytes`, this is an approximate token budget using the global
|
||||
/// bytes-per-token heuristic.
|
||||
pub fn token_budget(policy: &TruncationPolicy) -> usize {
|
||||
match policy {
|
||||
TruncationPolicy::Bytes(bytes) => {
|
||||
usize::try_from(approx_tokens_from_byte_count(*bytes)).unwrap_or(usize::MAX)
|
||||
}
|
||||
TruncationPolicy::Tokens(tokens) => *tokens,
|
||||
}
|
||||
}
|
||||
|
||||
impl TruncationPolicy {
|
||||
/// Scale the underlying budget by `multiplier`, rounding up to avoid under-budgeting.
|
||||
pub fn mul(self, multiplier: f64) -> Self {
|
||||
match self {
|
||||
TruncationPolicy::Bytes(bytes) => {
|
||||
TruncationPolicy::Bytes((bytes as f64 * multiplier).ceil() as usize)
|
||||
}
|
||||
TruncationPolicy::Tokens(tokens) => {
|
||||
TruncationPolicy::Tokens((tokens as f64 * multiplier).ceil() as usize)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(config: &Config, truncation_policy: TruncationPolicy) -> Self {
|
||||
let config_token_limit = config.tool_output_token_limit;
|
||||
|
||||
match truncation_policy {
|
||||
TruncationPolicy::Bytes(family_bytes) => {
|
||||
if let Some(token_limit) = config_token_limit {
|
||||
Self::Bytes(approx_bytes_for_tokens(token_limit))
|
||||
} else {
|
||||
Self::Bytes(family_bytes)
|
||||
}
|
||||
}
|
||||
TruncationPolicy::Tokens(family_tokens) => {
|
||||
if let Some(token_limit) = config_token_limit {
|
||||
Self::Tokens(token_limit)
|
||||
} else {
|
||||
Self::Tokens(family_tokens)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a token budget derived from this policy.
|
||||
///
|
||||
/// - For `Tokens`, this is the explicit token limit.
|
||||
/// - For `Bytes`, this is an approximate token budget using the global
|
||||
/// bytes-per-token heuristic.
|
||||
pub fn token_budget(&self) -> usize {
|
||||
match self {
|
||||
TruncationPolicy::Bytes(bytes) => {
|
||||
usize::try_from(approx_tokens_from_byte_count(*bytes)).unwrap_or(usize::MAX)
|
||||
}
|
||||
TruncationPolicy::Tokens(tokens) => *tokens,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a byte budget derived from this policy.
|
||||
///
|
||||
/// - For `Bytes`, this is the explicit byte limit.
|
||||
/// - For `Tokens`, this is an approximate byte budget using the global
|
||||
/// bytes-per-token heuristic.
|
||||
pub fn byte_budget(&self) -> usize {
|
||||
match self {
|
||||
TruncationPolicy::Bytes(bytes) => *bytes,
|
||||
TruncationPolicy::Tokens(tokens) => approx_bytes_for_tokens(*tokens),
|
||||
}
|
||||
/// Returns a byte budget derived from this policy.
|
||||
///
|
||||
/// - For `Bytes`, this is the explicit byte limit.
|
||||
/// - For `Tokens`, this is an approximate byte budget using the global
|
||||
/// bytes-per-token heuristic.
|
||||
pub fn byte_budget(policy: &TruncationPolicy) -> usize {
|
||||
match policy {
|
||||
TruncationPolicy::Bytes(bytes) => *bytes,
|
||||
TruncationPolicy::Tokens(tokens) => approx_bytes_for_tokens(*tokens),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn formatted_truncate_text(content: &str, policy: TruncationPolicy) -> String {
|
||||
if content.len() <= policy.byte_budget() {
|
||||
if content.len() <= byte_budget(&policy) {
|
||||
return content.to_string();
|
||||
}
|
||||
let total_lines = content.lines().count();
|
||||
@@ -122,8 +86,8 @@ pub(crate) fn truncate_function_output_items_with_policy(
|
||||
) -> Vec<FunctionCallOutputContentItem> {
|
||||
let mut out: Vec<FunctionCallOutputContentItem> = Vec::with_capacity(items.len());
|
||||
let mut remaining_budget = match policy {
|
||||
TruncationPolicy::Bytes(_) => policy.byte_budget(),
|
||||
TruncationPolicy::Tokens(_) => policy.token_budget(),
|
||||
TruncationPolicy::Bytes(_) => byte_budget(&policy),
|
||||
TruncationPolicy::Tokens(_) => token_budget(&policy),
|
||||
};
|
||||
let mut omitted_text_items = 0usize;
|
||||
|
||||
@@ -182,7 +146,7 @@ fn truncate_with_token_budget(s: &str, policy: TruncationPolicy) -> (String, Opt
|
||||
if s.is_empty() {
|
||||
return (String::new(), None);
|
||||
}
|
||||
let max_tokens = policy.token_budget();
|
||||
let max_tokens = token_budget(&policy);
|
||||
|
||||
let byte_len = s.len();
|
||||
if max_tokens > 0 && byte_len <= approx_bytes_for_tokens(max_tokens) {
|
||||
@@ -208,7 +172,7 @@ fn truncate_with_byte_estimate(s: &str, policy: TruncationPolicy) -> String {
|
||||
}
|
||||
|
||||
let total_chars = s.chars().count();
|
||||
let max_bytes = policy.byte_budget();
|
||||
let max_bytes = byte_budget(&policy);
|
||||
|
||||
if max_bytes == 0 {
|
||||
// No budget to show content; just report that everything was truncated.
|
||||
|
||||
@@ -14,8 +14,8 @@ use crate::exec::ExecToolCallOutput;
|
||||
use crate::exec::SandboxType;
|
||||
use crate::exec::StreamOutput;
|
||||
use crate::exec::is_likely_sandbox_denied;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
use crate::truncate::formatted_truncate_text;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
use codex_utils_pty::ExecCommandSession;
|
||||
use codex_utils_pty::SpawnedPty;
|
||||
|
||||
|
||||
@@ -22,9 +22,9 @@ use crate::tools::orchestrator::ToolOrchestrator;
|
||||
use crate::tools::runtimes::unified_exec::UnifiedExecRequest as UnifiedExecToolRequest;
|
||||
use crate::tools::runtimes::unified_exec::UnifiedExecRuntime;
|
||||
use crate::tools::sandboxing::ToolCtx;
|
||||
use crate::truncate::TruncationPolicy;
|
||||
use crate::truncate::approx_token_count;
|
||||
use crate::truncate::formatted_truncate_text;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
|
||||
use super::CommandTranscript;
|
||||
use super::ExecCommandRequest;
|
||||
|
||||
@@ -250,7 +250,7 @@ impl TestCodex {
|
||||
approval_policy: AskForApproval,
|
||||
sandbox_policy: SandboxPolicy,
|
||||
) -> Result<()> {
|
||||
let session_model = self.session_configured.model.clone();
|
||||
let session_model = self.session_configured.model_family.slug.clone();
|
||||
self.codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
|
||||
@@ -297,7 +297,7 @@ async fn apply_patch_cli_move_without_content_change_has_no_turn_diff(
|
||||
let call_id = "apply-move-no-change";
|
||||
mount_apply_patch(&harness, call_id, patch, "ok", model_output).await;
|
||||
|
||||
let model = test.session_configured.model.clone();
|
||||
let model = test.session_configured.model_family.slug.clone();
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
@@ -883,7 +883,7 @@ async fn apply_patch_shell_command_heredoc_with_cd_emits_turn_diff() -> Result<(
|
||||
];
|
||||
mount_sse_sequence(harness.server(), bodies).await;
|
||||
|
||||
let model = test.session_configured.model.clone();
|
||||
let model = test.session_configured.model_family.slug.clone();
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
@@ -960,7 +960,7 @@ async fn apply_patch_shell_command_failure_propagates_error_and_skips_diff() ->
|
||||
];
|
||||
mount_sse_sequence(harness.server(), bodies).await;
|
||||
|
||||
let model = test.session_configured.model.clone();
|
||||
let model = test.session_configured.model_family.slug.clone();
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
@@ -1107,7 +1107,7 @@ async fn apply_patch_emits_turn_diff_event_with_unified_diff(
|
||||
let patch = format!("*** Begin Patch\n*** Add File: {file}\n+hello\n*** End Patch\n");
|
||||
mount_apply_patch(&harness, call_id, patch.as_str(), "ok", model_output).await;
|
||||
|
||||
let model = test.session_configured.model.clone();
|
||||
let model = test.session_configured.model_family.slug.clone();
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
@@ -1167,7 +1167,7 @@ async fn apply_patch_turn_diff_for_rename_with_content_change(
|
||||
let patch = "*** Begin Patch\n*** Update File: old.txt\n*** Move to: new.txt\n@@\n-old\n+new\n*** End Patch";
|
||||
mount_apply_patch(&harness, call_id, patch, "ok", model_output).await;
|
||||
|
||||
let model = test.session_configured.model.clone();
|
||||
let model = test.session_configured.model_family.slug.clone();
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
@@ -1235,7 +1235,7 @@ async fn apply_patch_aggregates_diff_across_multiple_tool_calls() -> Result<()>
|
||||
]);
|
||||
mount_sse_sequence(harness.server(), vec![s1, s2, s3]).await;
|
||||
|
||||
let model = test.session_configured.model.clone();
|
||||
let model = test.session_configured.model_family.slug.clone();
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
@@ -1303,7 +1303,7 @@ async fn apply_patch_aggregates_diff_preserves_success_after_failure() -> Result
|
||||
];
|
||||
mount_sse_sequence(harness.server(), responses).await;
|
||||
|
||||
let model = test.session_configured.model.clone();
|
||||
let model = test.session_configured.model_family.slug.clone();
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
|
||||
@@ -486,7 +486,7 @@ async fn submit_turn(
|
||||
approval_policy: AskForApproval,
|
||||
sandbox_policy: SandboxPolicy,
|
||||
) -> Result<()> {
|
||||
let session_model = test.session_configured.model.clone();
|
||||
let session_model = test.session_configured.model_family.slug.clone();
|
||||
|
||||
test.codex
|
||||
.submit(Op::UserTurn {
|
||||
|
||||
@@ -103,7 +103,7 @@ async fn remote_compact_replaces_history_for_followups() -> Result<()> {
|
||||
let compact_body = compact_request.body_json();
|
||||
assert_eq!(
|
||||
compact_body.get("model").and_then(|v| v.as_str()),
|
||||
Some(harness.test().session_configured.model.as_str())
|
||||
Some(harness.test().session_configured.model_family.slug.as_str())
|
||||
);
|
||||
let compact_body_text = compact_body.to_string();
|
||||
assert!(
|
||||
|
||||
@@ -67,7 +67,7 @@ async fn execpolicy_blocks_shell_invocation() -> Result<()> {
|
||||
)
|
||||
.await;
|
||||
|
||||
let session_model = test.session_configured.model.clone();
|
||||
let session_model = test.session_configured.model_family.slug.clone();
|
||||
test.codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
|
||||
@@ -606,7 +606,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a
|
||||
let default_cwd = config.cwd.clone();
|
||||
let default_approval_policy = config.approval_policy.value();
|
||||
let default_sandbox_policy = config.sandbox_policy.get();
|
||||
let default_model = session_configured.model;
|
||||
let default_model = session_configured.model_family.slug;
|
||||
let default_effort = config.model_reasoning_effort;
|
||||
let default_summary = config.model_reasoning_summary;
|
||||
|
||||
@@ -696,7 +696,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu
|
||||
let default_cwd = config.cwd.clone();
|
||||
let default_approval_policy = config.approval_policy.value();
|
||||
let default_sandbox_policy = config.sandbox_policy.get();
|
||||
let default_model = session_configured.model;
|
||||
let default_model = session_configured.model_family.slug;
|
||||
let default_effort = config.model_reasoning_effort;
|
||||
let default_summary = config.model_reasoning_summary;
|
||||
|
||||
|
||||
@@ -101,7 +101,7 @@ async fn stdio_server_round_trip() -> anyhow::Result<()> {
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
let session_model = fixture.session_configured.model.clone();
|
||||
let session_model = fixture.session_configured.model_family.slug.clone();
|
||||
|
||||
fixture
|
||||
.codex
|
||||
@@ -238,7 +238,7 @@ async fn stdio_image_responses_round_trip() -> anyhow::Result<()> {
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
let session_model = fixture.session_configured.model.clone();
|
||||
let session_model = fixture.session_configured.model_family.slug.clone();
|
||||
|
||||
fixture
|
||||
.codex
|
||||
@@ -433,7 +433,7 @@ async fn stdio_image_completions_round_trip() -> anyhow::Result<()> {
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
let session_model = fixture.session_configured.model.clone();
|
||||
let session_model = fixture.session_configured.model_family.slug.clone();
|
||||
|
||||
fixture
|
||||
.codex
|
||||
@@ -576,7 +576,7 @@ async fn stdio_server_propagates_whitelisted_env_vars() -> anyhow::Result<()> {
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
let session_model = fixture.session_configured.model.clone();
|
||||
let session_model = fixture.session_configured.model_family.slug.clone();
|
||||
|
||||
fixture
|
||||
.codex
|
||||
@@ -724,7 +724,7 @@ async fn streamable_http_tool_call_round_trip() -> anyhow::Result<()> {
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
let session_model = fixture.session_configured.model.clone();
|
||||
let session_model = fixture.session_configured.model_family.slug.clone();
|
||||
|
||||
fixture
|
||||
.codex
|
||||
@@ -904,7 +904,7 @@ async fn streamable_http_with_oauth_round_trip() -> anyhow::Result<()> {
|
||||
})
|
||||
.build(&server)
|
||||
.await?;
|
||||
let session_model = fixture.session_configured.model.clone();
|
||||
let session_model = fixture.session_configured.model_family.slug.clone();
|
||||
|
||||
fixture
|
||||
.codex
|
||||
|
||||
@@ -64,7 +64,7 @@ async fn run_snapshot_command(command: &str) -> Result<SnapshotRun> {
|
||||
let test = harness.test();
|
||||
let codex = test.codex.clone();
|
||||
let codex_home = test.home.path().to_path_buf();
|
||||
let session_model = test.session_configured.model.clone();
|
||||
let session_model = test.session_configured.model_family.slug.clone();
|
||||
let cwd = test.cwd_path().to_path_buf();
|
||||
|
||||
codex
|
||||
@@ -140,7 +140,7 @@ async fn run_shell_command_snapshot(command: &str) -> Result<SnapshotRun> {
|
||||
let test = harness.test();
|
||||
let codex = test.codex.clone();
|
||||
let codex_home = test.home.path().to_path_buf();
|
||||
let session_model = test.session_configured.model.clone();
|
||||
let session_model = test.session_configured.model_family.slug.clone();
|
||||
let cwd = test.cwd_path().to_path_buf();
|
||||
|
||||
codex
|
||||
@@ -279,7 +279,7 @@ async fn shell_command_snapshot_still_intercepts_apply_patch() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(harness.server(), responses).await;
|
||||
|
||||
let model = test.session_configured.model.clone();
|
||||
let model = test.session_configured.model_family.slug.clone();
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
|
||||
@@ -63,7 +63,7 @@ async fn user_turn_includes_skill_instructions() -> Result<()> {
|
||||
)
|
||||
.await;
|
||||
|
||||
let session_model = test.session_configured.model.clone();
|
||||
let session_model = test.session_configured.model_family.slug.clone();
|
||||
test.codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![
|
||||
|
||||
@@ -76,7 +76,7 @@ async fn shell_tool_executes_command_and_streams_output() -> anyhow::Result<()>
|
||||
]);
|
||||
let second_mock = responses::mount_sse_once(&server, second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -142,7 +142,7 @@ async fn update_plan_tool_emits_plan_update_event() -> anyhow::Result<()> {
|
||||
]);
|
||||
let second_mock = responses::mount_sse_once(&server, second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -218,7 +218,7 @@ async fn update_plan_tool_rejects_malformed_payload() -> anyhow::Result<()> {
|
||||
]);
|
||||
let second_mock = responses::mount_sse_once(&server, second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -306,7 +306,7 @@ async fn apply_patch_tool_executes_and_emits_patch_events() -> anyhow::Result<()
|
||||
]);
|
||||
let second_mock = responses::mount_sse_once(&server, second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -402,7 +402,7 @@ async fn apply_patch_reports_parse_diagnostics() -> anyhow::Result<()> {
|
||||
]);
|
||||
let second_mock = responses::mount_sse_once(&server, second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
|
||||
@@ -32,7 +32,7 @@ use serde_json::json;
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
async fn run_turn(test: &TestCodex, prompt: &str) -> anyhow::Result<()> {
|
||||
let session_model = test.session_configured.model.clone();
|
||||
let session_model = test.session_configured.model_family.slug.clone();
|
||||
|
||||
test.codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -345,7 +345,7 @@ async fn shell_tools_start_before_response_completed_when_stream_delayed() -> an
|
||||
.build_with_streaming_server(&streaming_server)
|
||||
.await?;
|
||||
|
||||
let session_model = test.session_configured.model.clone();
|
||||
let session_model = test.session_configured.model_family.slug.clone();
|
||||
test.codex
|
||||
.submit(Op::UserTurn {
|
||||
items: vec![UserInput::Text {
|
||||
|
||||
@@ -531,7 +531,7 @@ async fn mcp_image_output_preserves_image_and_no_text_summary() -> Result<()> {
|
||||
);
|
||||
});
|
||||
let fixture = builder.build(&server).await?;
|
||||
let session_model = fixture.session_configured.model.clone();
|
||||
let session_model = fixture.session_configured.model_family.slug.clone();
|
||||
|
||||
fixture
|
||||
.codex
|
||||
|
||||
@@ -194,7 +194,7 @@ async fn unified_exec_intercepts_apply_patch_exec_command() -> Result<()> {
|
||||
let test = harness.test();
|
||||
let codex = test.codex.clone();
|
||||
let cwd = test.cwd_path().to_path_buf();
|
||||
let session_model = test.session_configured.model.clone();
|
||||
let session_model = test.session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -320,7 +320,7 @@ async fn unified_exec_emits_exec_command_begin_event() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -395,7 +395,7 @@ async fn unified_exec_resolves_relative_workdir() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -473,7 +473,7 @@ async fn unified_exec_respects_workdir_override() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -563,7 +563,7 @@ async fn unified_exec_emits_exec_command_end_event() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -635,7 +635,7 @@ async fn unified_exec_emits_output_delta_for_exec_command() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -708,7 +708,7 @@ async fn unified_exec_full_lifecycle_with_background_end_event() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -834,7 +834,7 @@ async fn unified_exec_emits_terminal_interaction_for_write_stdin() -> Result<()>
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -967,7 +967,7 @@ async fn unified_exec_terminal_interaction_captures_delayed_output() -> Result<(
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -1124,7 +1124,7 @@ async fn unified_exec_emits_one_begin_and_one_end_event() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -1219,7 +1219,7 @@ async fn exec_command_reports_chunk_and_exit_metadata() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -1323,7 +1323,7 @@ async fn unified_exec_respects_early_exit_notifications() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -1448,7 +1448,7 @@ async fn write_stdin_returns_exit_metadata_and_clears_session() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -1610,7 +1610,7 @@ async fn unified_exec_emits_end_event_when_session_dies_via_stdin() -> Result<()
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -1684,7 +1684,7 @@ async fn unified_exec_closes_long_running_session_at_turn_end() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -1802,7 +1802,7 @@ async fn unified_exec_reuses_session_via_stdin() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -1931,7 +1931,7 @@ PY
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -2040,7 +2040,7 @@ async fn unified_exec_timeout_and_followup_poll() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -2131,7 +2131,7 @@ PY
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -2207,7 +2207,7 @@ async fn unified_exec_runs_under_sandbox() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -2306,7 +2306,7 @@ async fn unified_exec_python_prompt_under_seatbelt() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -2396,7 +2396,7 @@ async fn unified_exec_runs_on_all_platforms() -> Result<()> {
|
||||
];
|
||||
mount_sse_sequence(&server, responses).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -2524,7 +2524,7 @@ async fn unified_exec_prunes_exited_sessions_first() -> Result<()> {
|
||||
let response_mock =
|
||||
mount_sse_sequence(&server, vec![first_response, completion_response]).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
|
||||
@@ -72,7 +72,7 @@ async fn user_turn_with_local_image_attaches_image() -> anyhow::Result<()> {
|
||||
]);
|
||||
let mock = responses::mount_sse_once(&server, response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -163,7 +163,7 @@ async fn view_image_tool_attaches_local_image() -> anyhow::Result<()> {
|
||||
]);
|
||||
let mock = responses::mount_sse_once(&server, second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -273,7 +273,7 @@ async fn view_image_tool_errors_when_path_is_directory() -> anyhow::Result<()> {
|
||||
]);
|
||||
let mock = responses::mount_sse_once(&server, second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -345,7 +345,7 @@ async fn view_image_tool_placeholder_for_non_image_files() -> anyhow::Result<()>
|
||||
]);
|
||||
let mock = responses::mount_sse_once(&server, second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -436,7 +436,7 @@ async fn view_image_tool_errors_when_file_missing() -> anyhow::Result<()> {
|
||||
]);
|
||||
let mock = responses::mount_sse_once(&server, second_response).await;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
@@ -517,7 +517,7 @@ async fn replaces_invalid_local_image_after_bad_request() -> anyhow::Result<()>
|
||||
let image = ImageBuffer::from_pixel(1024, 512, Rgba([10u8, 20, 30, 255]));
|
||||
image.save(&abs_path)?;
|
||||
|
||||
let session_model = session_configured.model.clone();
|
||||
let session_model = session_configured.model_family.slug.clone();
|
||||
|
||||
codex
|
||||
.submit(Op::UserTurn {
|
||||
|
||||
@@ -173,3 +173,17 @@ sequenceDiagram
|
||||
task2->>user: Event::TurnCompleted
|
||||
task2->>-user: Event::TaskCompleted
|
||||
```
|
||||
|
||||
## Key Events
|
||||
|
||||
### SessionConfigured
|
||||
|
||||
`Event::SessionConfigured` is emitted once after `Op::ConfigureSession`.
|
||||
|
||||
Notable fields:
|
||||
- `session_id`: the backend-assigned session identifier (historically named `session_id` for
|
||||
backwards compatibility).
|
||||
- `model_family`: model metadata for the model family selected for the session. This matches the
|
||||
`/models` payload shape; the selected model slug is `model_family.slug`.
|
||||
- `approval_policy`, `sandbox_policy`, `cwd`: the effective execution and sandbox settings for the
|
||||
session.
|
||||
|
||||
@@ -140,8 +140,10 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
VERSION
|
||||
);
|
||||
|
||||
let mut entries =
|
||||
create_config_summary_entries(config, session_configured_event.model.as_str());
|
||||
let mut entries = create_config_summary_entries(
|
||||
config,
|
||||
session_configured_event.model_family.slug.as_str(),
|
||||
);
|
||||
entries.push((
|
||||
"session id",
|
||||
session_configured_event.session_id.to_string(),
|
||||
@@ -494,7 +496,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
EventMsg::SessionConfigured(session_configured_event) => {
|
||||
let SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model,
|
||||
model_family,
|
||||
..
|
||||
} = session_configured_event;
|
||||
|
||||
@@ -505,7 +507,7 @@ impl EventProcessor for EventProcessorWithHumanOutput {
|
||||
conversation_id.to_string().style(self.dimmed)
|
||||
);
|
||||
|
||||
ts_msg!(self, "model: {}", model);
|
||||
ts_msg!(self, "model: {}", model_family.slug);
|
||||
eprintln!();
|
||||
}
|
||||
EventMsg::PlanUpdate(plan_update_event) => {
|
||||
|
||||
@@ -44,6 +44,11 @@ use codex_exec::exec_events::TurnFailedEvent;
|
||||
use codex_exec::exec_events::TurnStartedEvent;
|
||||
use codex_exec::exec_events::Usage;
|
||||
use codex_exec::exec_events::WebSearchItem;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
use codex_protocol::plan_tool::PlanItemArg;
|
||||
use codex_protocol::plan_tool::StepStatus;
|
||||
use codex_protocol::plan_tool::UpdatePlanArgs;
|
||||
@@ -76,7 +81,25 @@ fn session_configured_produces_thread_started_event() {
|
||||
"e1",
|
||||
EventMsg::SessionConfigured(SessionConfiguredEvent {
|
||||
session_id,
|
||||
model: "codex-mini-latest".to_string(),
|
||||
model_family: ModelFamily {
|
||||
slug: "codex-mini-latest".to_string(),
|
||||
family: "codex-mini-latest".to_string(),
|
||||
needs_special_apply_patch_instructions: false,
|
||||
context_window: None,
|
||||
auto_compact_token_limit: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_effort: Some(ReasoningEffort::default()),
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
supports_parallel_tool_calls: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: String::new(),
|
||||
experimental_supported_tools: Vec::new(),
|
||||
effective_context_window_percent: 95,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
shell_type: ConfigShellToolType::Default,
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
},
|
||||
model_provider_id: "test-provider".to_string(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
|
||||
@@ -239,13 +239,39 @@ mod tests {
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_core::protocol::SessionConfiguredEvent;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
use pretty_assertions::assert_eq;
|
||||
use serde_json::json;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn test_model_family(slug: &str) -> ModelFamily {
|
||||
ModelFamily {
|
||||
slug: slug.to_string(),
|
||||
family: slug.to_string(),
|
||||
needs_special_apply_patch_instructions: false,
|
||||
context_window: None,
|
||||
auto_compact_token_limit: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_effort: Some(ReasoningEffort::default()),
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
supports_parallel_tool_calls: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: String::new(),
|
||||
experimental_supported_tools: Vec::new(),
|
||||
effective_context_window_percent: 95,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
shell_type: ConfigShellToolType::Default,
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_send_event_as_notification() -> Result<()> {
|
||||
let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded_channel::<OutgoingMessage>();
|
||||
@@ -257,7 +283,7 @@ mod tests {
|
||||
id: "1".to_string(),
|
||||
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model: "gpt-4o".to_string(),
|
||||
model_family: test_model_family("gpt-4o"),
|
||||
model_provider_id: "test-provider".to_string(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
@@ -296,7 +322,7 @@ mod tests {
|
||||
let rollout_file = NamedTempFile::new()?;
|
||||
let session_configured_event = SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model: "gpt-4o".to_string(),
|
||||
model_family: test_model_family("gpt-4o"),
|
||||
model_provider_id: "test-provider".to_string(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
@@ -332,7 +358,7 @@ mod tests {
|
||||
"msg": {
|
||||
"type": "session_configured",
|
||||
"session_id": session_configured_event.session_id,
|
||||
"model": "gpt-4o",
|
||||
"model_family": serde_json::to_value(&session_configured_event.model_family)?,
|
||||
"model_provider_id": "test-provider",
|
||||
"approval_policy": "never",
|
||||
"sandbox_policy": {
|
||||
|
||||
@@ -163,6 +163,199 @@ impl TruncationPolicyConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, Copy, PartialEq, Eq, Hash, JsonSchema, TS)]
|
||||
pub enum TruncationPolicy {
|
||||
Bytes(usize),
|
||||
Tokens(usize),
|
||||
}
|
||||
|
||||
impl From<TruncationPolicyConfig> for TruncationPolicy {
|
||||
fn from(config: TruncationPolicyConfig) -> Self {
|
||||
match config.mode {
|
||||
TruncationMode::Bytes => Self::Bytes(config.limit as usize),
|
||||
TruncationMode::Tokens => Self::Tokens(config.limit as usize),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Mul<f64> for TruncationPolicy {
|
||||
type Output = Self;
|
||||
|
||||
/// Scale the underlying budget by `multiplier`, rounding up to avoid under-budgeting.
|
||||
fn mul(self, multiplier: f64) -> Self::Output {
|
||||
match self {
|
||||
TruncationPolicy::Bytes(bytes) => {
|
||||
TruncationPolicy::Bytes((bytes as f64 * multiplier).ceil() as usize)
|
||||
}
|
||||
TruncationPolicy::Tokens(tokens) => {
|
||||
TruncationPolicy::Tokens((tokens as f64 * multiplier).ceil() as usize)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A model family is a group of models that share certain characteristics.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, Hash, JsonSchema, TS)]
|
||||
pub struct ModelFamily {
|
||||
/// The full model slug used to derive this model family, e.g.
|
||||
/// "gpt-4.1-2025-04-14".
|
||||
pub slug: String,
|
||||
|
||||
/// The model family name, e.g. "gpt-4.1". This string is used when deriving
|
||||
/// default metadata for the family, such as context windows.
|
||||
pub family: String,
|
||||
|
||||
/// True if the model needs additional instructions on how to use the
|
||||
/// "virtual" `apply_patch` CLI.
|
||||
pub needs_special_apply_patch_instructions: bool,
|
||||
|
||||
/// Maximum supported context window, if known.
|
||||
pub context_window: Option<i64>,
|
||||
|
||||
/// Token threshold for automatic compaction if config does not override it.
|
||||
pub auto_compact_token_limit: Option<i64>,
|
||||
|
||||
// Whether the `reasoning` field can be set when making a request to this
|
||||
// model family. Note it has `effort` and `summary` subfields (though
|
||||
// `summary` is optional).
|
||||
pub supports_reasoning_summaries: bool,
|
||||
|
||||
// The reasoning effort to use for this model family when none is explicitly chosen.
|
||||
pub default_reasoning_effort: Option<ReasoningEffort>,
|
||||
|
||||
// Define if we need a special handling of reasoning summary
|
||||
pub reasoning_summary_format: ReasoningSummaryFormat,
|
||||
|
||||
/// Whether this model supports parallel tool calls when using the
|
||||
/// Responses API.
|
||||
pub supports_parallel_tool_calls: bool,
|
||||
|
||||
/// Present if the model performs better when `apply_patch` is provided as
|
||||
/// a tool call instead of just a bash command
|
||||
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
|
||||
|
||||
// Instructions to use for querying the model
|
||||
pub base_instructions: String,
|
||||
|
||||
/// Names of beta tools that should be exposed to this model family.
|
||||
pub experimental_supported_tools: Vec<String>,
|
||||
|
||||
/// Percentage of the context window considered usable for inputs, after
|
||||
/// reserving headroom for system prompts, tool overhead, and model output.
|
||||
/// This is applied when computing the effective context window seen by
|
||||
/// consumers.
|
||||
pub effective_context_window_percent: i64,
|
||||
|
||||
/// If the model family supports setting the verbosity level when using Responses API.
|
||||
pub support_verbosity: bool,
|
||||
|
||||
// The default verbosity level for this model family when using Responses API.
|
||||
pub default_verbosity: Option<Verbosity>,
|
||||
|
||||
/// Preferred shell tool type for this model family when features do not override it.
|
||||
pub shell_type: ConfigShellToolType,
|
||||
|
||||
pub truncation_policy: TruncationPolicy,
|
||||
}
|
||||
|
||||
impl ModelFamily {
|
||||
/// Convert a `ModelFamily` into the protocol's `ModelInfo` shape for inclusion in events.
|
||||
///
|
||||
/// This intentionally omits fields that are not needed for session bootstrapping
|
||||
/// (e.g. `priority`, `visibility`, and `base_instructions`).
|
||||
pub fn to_session_configured_model_info(&self) -> ModelInfo {
|
||||
let default_reasoning_level = self.default_reasoning_effort.unwrap_or_default();
|
||||
let truncation_policy = match self.truncation_policy {
|
||||
TruncationPolicy::Bytes(limit) => TruncationPolicyConfig::bytes(limit as i64),
|
||||
TruncationPolicy::Tokens(limit) => TruncationPolicyConfig::tokens(limit as i64),
|
||||
};
|
||||
|
||||
ModelInfo {
|
||||
slug: self.slug.clone(),
|
||||
display_name: self.slug.clone(),
|
||||
description: None,
|
||||
default_reasoning_level,
|
||||
supported_reasoning_levels: vec![ReasoningEffortPreset {
|
||||
effort: default_reasoning_level,
|
||||
description: default_reasoning_level.to_string(),
|
||||
}],
|
||||
shell_type: self.shell_type,
|
||||
visibility: ModelVisibility::None,
|
||||
supported_in_api: true,
|
||||
priority: 0,
|
||||
upgrade: None,
|
||||
base_instructions: None,
|
||||
supports_reasoning_summaries: self.supports_reasoning_summaries,
|
||||
support_verbosity: self.support_verbosity,
|
||||
default_verbosity: self.default_verbosity,
|
||||
apply_patch_tool_type: self.apply_patch_tool_type.clone(),
|
||||
truncation_policy,
|
||||
supports_parallel_tool_calls: self.supports_parallel_tool_calls,
|
||||
context_window: self.context_window,
|
||||
reasoning_summary_format: self.reasoning_summary_format.clone(),
|
||||
experimental_supported_tools: self.experimental_supported_tools.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn auto_compact_token_limit(&self) -> Option<i64> {
|
||||
self.auto_compact_token_limit
|
||||
.or(self.context_window.map(|cw| (cw * 9) / 10))
|
||||
}
|
||||
|
||||
pub fn get_model_slug(&self) -> &str {
|
||||
&self.slug
|
||||
}
|
||||
|
||||
pub fn with_remote_overrides(mut self, remote_models: Vec<ModelInfo>) -> Self {
|
||||
for model in remote_models {
|
||||
if model.slug == self.slug {
|
||||
self.apply_remote_overrides(model);
|
||||
}
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
fn apply_remote_overrides(&mut self, model: ModelInfo) {
|
||||
let ModelInfo {
|
||||
slug: _,
|
||||
display_name: _,
|
||||
description: _,
|
||||
default_reasoning_level,
|
||||
supported_reasoning_levels: _,
|
||||
shell_type,
|
||||
visibility: _,
|
||||
supported_in_api: _,
|
||||
priority: _,
|
||||
upgrade: _,
|
||||
base_instructions,
|
||||
supports_reasoning_summaries,
|
||||
support_verbosity,
|
||||
default_verbosity,
|
||||
apply_patch_tool_type,
|
||||
truncation_policy,
|
||||
supports_parallel_tool_calls,
|
||||
context_window,
|
||||
reasoning_summary_format,
|
||||
experimental_supported_tools,
|
||||
} = model;
|
||||
|
||||
self.default_reasoning_effort = Some(default_reasoning_level);
|
||||
self.shell_type = shell_type;
|
||||
if let Some(base) = base_instructions {
|
||||
self.base_instructions = base;
|
||||
}
|
||||
self.supports_reasoning_summaries = supports_reasoning_summaries;
|
||||
self.support_verbosity = support_verbosity;
|
||||
self.default_verbosity = default_verbosity;
|
||||
self.apply_patch_tool_type = apply_patch_tool_type;
|
||||
self.truncation_policy = truncation_policy.into();
|
||||
self.supports_parallel_tool_calls = supports_parallel_tool_calls;
|
||||
self.context_window = context_window;
|
||||
self.reasoning_summary_format = reasoning_summary_format;
|
||||
self.experimental_supported_tools = experimental_supported_tools;
|
||||
}
|
||||
}
|
||||
|
||||
/// Semantic version triple encoded as an array in JSON (e.g. [0, 62, 0]).
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema)]
|
||||
pub struct ClientVersion(pub i32, pub i32, pub i32);
|
||||
|
||||
@@ -19,6 +19,7 @@ use crate::message_history::HistoryEntry;
|
||||
use crate::models::ContentItem;
|
||||
use crate::models::ResponseItem;
|
||||
use crate::num_format::format_with_separators;
|
||||
use crate::openai_models::ModelFamily;
|
||||
use crate::openai_models::ReasoningEffort as ReasoningEffortConfig;
|
||||
use crate::parse_command::ParsedCommand;
|
||||
use crate::plan_tool::UpdatePlanArgs;
|
||||
@@ -1343,6 +1344,17 @@ pub enum TruncationPolicy {
|
||||
Tokens(usize),
|
||||
}
|
||||
|
||||
// Allow core to pass model-family truncation policies (defined in openai_models)
|
||||
// into the TurnContext payload (defined in protocol) via `.into()`.
|
||||
impl From<crate::openai_models::TruncationPolicy> for TruncationPolicy {
|
||||
fn from(value: crate::openai_models::TruncationPolicy) -> Self {
|
||||
match value {
|
||||
crate::openai_models::TruncationPolicy::Bytes(bytes) => Self::Bytes(bytes),
|
||||
crate::openai_models::TruncationPolicy::Tokens(tokens) => Self::Tokens(tokens),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, JsonSchema)]
|
||||
pub struct RolloutLine {
|
||||
pub timestamp: String,
|
||||
@@ -1770,8 +1782,8 @@ pub struct SessionConfiguredEvent {
|
||||
/// Name left as session_id instead of conversation_id for backwards compatibility.
|
||||
pub session_id: ConversationId,
|
||||
|
||||
/// Tell the client what model is being queried.
|
||||
pub model: String,
|
||||
/// Model metadata for the model family that Codex selected for this session.
|
||||
pub model_family: ModelFamily,
|
||||
|
||||
pub model_provider_id: String,
|
||||
|
||||
@@ -1929,11 +1941,30 @@ mod tests {
|
||||
fn serialize_event() -> Result<()> {
|
||||
let conversation_id = ConversationId::from_string("67e55044-10b1-426f-9247-bb680e5fe0c8")?;
|
||||
let rollout_file = NamedTempFile::new()?;
|
||||
let model_family = crate::openai_models::ModelFamily {
|
||||
slug: "codex-mini-latest".to_string(),
|
||||
family: "codex-mini-latest".to_string(),
|
||||
needs_special_apply_patch_instructions: false,
|
||||
context_window: None,
|
||||
auto_compact_token_limit: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_effort: Some(crate::openai_models::ReasoningEffort::Medium),
|
||||
reasoning_summary_format: crate::openai_models::ReasoningSummaryFormat::None,
|
||||
supports_parallel_tool_calls: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: "".to_string(),
|
||||
experimental_supported_tools: Vec::new(),
|
||||
effective_context_window_percent: 95,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
shell_type: crate::openai_models::ConfigShellToolType::Default,
|
||||
truncation_policy: crate::openai_models::TruncationPolicy::Bytes(10_000),
|
||||
};
|
||||
let event = Event {
|
||||
id: "1234".to_string(),
|
||||
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model: "codex-mini-latest".to_string(),
|
||||
model_family: model_family.clone(),
|
||||
model_provider_id: "openai".to_string(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
@@ -1951,7 +1982,7 @@ mod tests {
|
||||
"msg": {
|
||||
"type": "session_configured",
|
||||
"session_id": "67e55044-10b1-426f-9247-bb680e5fe0c8",
|
||||
"model": "codex-mini-latest",
|
||||
"model_family": serde_json::to_value(model_family)?,
|
||||
"model_provider_id": "openai",
|
||||
"approval_policy": "never",
|
||||
"sandbox_policy": {
|
||||
|
||||
@@ -10,9 +10,6 @@ use crate::external_editor;
|
||||
use crate::file_search::FileSearchManager;
|
||||
use crate::history_cell;
|
||||
use crate::history_cell::HistoryCell;
|
||||
use crate::model_migration::ModelMigrationOutcome;
|
||||
use crate::model_migration::migration_copy_for_models;
|
||||
use crate::model_migration::run_model_migration_prompt;
|
||||
use crate::pager_overlay::Overlay;
|
||||
use crate::render::highlight::highlight_bash_to_lines;
|
||||
use crate::render::renderable::Renderable;
|
||||
@@ -28,7 +25,6 @@ use codex_core::config::edit::ConfigEdit;
|
||||
use codex_core::config::edit::ConfigEditsBuilder;
|
||||
#[cfg(target_os = "windows")]
|
||||
use codex_core::features::Feature;
|
||||
use codex_core::models_manager::manager::ModelsManager;
|
||||
use codex_core::models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG;
|
||||
use codex_core::models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
|
||||
use codex_core::protocol::EventMsg;
|
||||
@@ -40,7 +36,6 @@ use codex_core::protocol::SkillErrorInfo;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ModelUpgrade;
|
||||
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
|
||||
use color_eyre::eyre::Result;
|
||||
use color_eyre::eyre::WrapErr;
|
||||
@@ -173,118 +168,6 @@ fn migration_prompt_hidden(config: &Config, migration_config_key: &str) -> bool
|
||||
}
|
||||
}
|
||||
|
||||
fn target_preset_for_upgrade<'a>(
|
||||
available_models: &'a [ModelPreset],
|
||||
target_model: &str,
|
||||
) -> Option<&'a ModelPreset> {
|
||||
available_models
|
||||
.iter()
|
||||
.find(|preset| preset.model == target_model)
|
||||
}
|
||||
|
||||
async fn handle_model_migration_prompt_if_needed(
|
||||
tui: &mut tui::Tui,
|
||||
config: &mut Config,
|
||||
model: &str,
|
||||
app_event_tx: &AppEventSender,
|
||||
models_manager: Arc<ModelsManager>,
|
||||
) -> Option<AppExitInfo> {
|
||||
let available_models = models_manager.list_models(config).await;
|
||||
let upgrade = available_models
|
||||
.iter()
|
||||
.find(|preset| preset.model == model)
|
||||
.and_then(|preset| preset.upgrade.as_ref());
|
||||
|
||||
if let Some(ModelUpgrade {
|
||||
id: target_model,
|
||||
reasoning_effort_mapping,
|
||||
migration_config_key,
|
||||
model_link,
|
||||
upgrade_copy,
|
||||
}) = upgrade
|
||||
{
|
||||
if migration_prompt_hidden(config, migration_config_key.as_str()) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let target_model = target_model.to_string();
|
||||
if !should_show_model_migration_prompt(
|
||||
model,
|
||||
&target_model,
|
||||
&config.notices.model_migrations,
|
||||
&available_models,
|
||||
) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let current_preset = available_models.iter().find(|preset| preset.model == model);
|
||||
let target_preset = target_preset_for_upgrade(&available_models, &target_model);
|
||||
let target_preset = target_preset?;
|
||||
let target_display_name = target_preset.display_name.clone();
|
||||
let heading_label = if target_display_name == model {
|
||||
target_model.clone()
|
||||
} else {
|
||||
target_display_name.clone()
|
||||
};
|
||||
let target_description =
|
||||
(!target_preset.description.is_empty()).then(|| target_preset.description.clone());
|
||||
let can_opt_out = current_preset.is_some();
|
||||
let prompt_copy = migration_copy_for_models(
|
||||
model,
|
||||
&target_model,
|
||||
model_link.clone(),
|
||||
upgrade_copy.clone(),
|
||||
heading_label,
|
||||
target_description,
|
||||
can_opt_out,
|
||||
);
|
||||
match run_model_migration_prompt(tui, prompt_copy).await {
|
||||
ModelMigrationOutcome::Accepted => {
|
||||
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
|
||||
from_model: model.to_string(),
|
||||
to_model: target_model.clone(),
|
||||
});
|
||||
config.model = Some(target_model.clone());
|
||||
|
||||
let mapped_effort = if let Some(reasoning_effort_mapping) = reasoning_effort_mapping
|
||||
&& let Some(reasoning_effort) = config.model_reasoning_effort
|
||||
{
|
||||
reasoning_effort_mapping
|
||||
.get(&reasoning_effort)
|
||||
.cloned()
|
||||
.or(config.model_reasoning_effort)
|
||||
} else {
|
||||
config.model_reasoning_effort
|
||||
};
|
||||
|
||||
config.model_reasoning_effort = mapped_effort;
|
||||
|
||||
app_event_tx.send(AppEvent::UpdateModel(target_model.clone()));
|
||||
app_event_tx.send(AppEvent::UpdateReasoningEffort(mapped_effort));
|
||||
app_event_tx.send(AppEvent::PersistModelSelection {
|
||||
model: target_model.clone(),
|
||||
effort: mapped_effort,
|
||||
});
|
||||
}
|
||||
ModelMigrationOutcome::Rejected => {
|
||||
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
|
||||
from_model: model.to_string(),
|
||||
to_model: target_model.clone(),
|
||||
});
|
||||
}
|
||||
ModelMigrationOutcome::Exit => {
|
||||
return Some(AppExitInfo {
|
||||
token_usage: TokenUsage::default(),
|
||||
conversation_id: None,
|
||||
update_action: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub(crate) struct App {
|
||||
pub(crate) server: Arc<ConversationManager>,
|
||||
pub(crate) app_event_tx: AppEventSender,
|
||||
@@ -332,11 +215,122 @@ impl App {
|
||||
}
|
||||
}
|
||||
|
||||
async fn on_session_configured_model(&mut self, model: String) {
|
||||
self.maybe_emit_pending_model_migration_notice(model.as_str())
|
||||
.await;
|
||||
self.spawn_schedule_model_migration_notice(model);
|
||||
}
|
||||
|
||||
async fn maybe_emit_pending_model_migration_notice(&mut self, used_model: &str) {
|
||||
let pending =
|
||||
match crate::model_migration_notice::read_pending_model_migration_notice(&self.config)
|
||||
.await
|
||||
{
|
||||
Ok(Some(pending)) => pending,
|
||||
Ok(None) => return,
|
||||
Err(err) => {
|
||||
tracing::error!(
|
||||
error = %err,
|
||||
"failed to read pending model migration notice"
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let should_show = pending.from_model == used_model;
|
||||
if should_show {
|
||||
let message = format!(
|
||||
"Recommended model upgrade: switch from {} to {}. Run /model to change.",
|
||||
pending.from_model, pending.to_model
|
||||
);
|
||||
self.app_event_tx.send(AppEvent::InsertHistoryCell(Box::new(
|
||||
crate::history_cell::new_warning_event(message),
|
||||
)));
|
||||
|
||||
self.config
|
||||
.notices
|
||||
.model_migrations
|
||||
.insert(pending.from_model.clone(), pending.to_model.clone());
|
||||
}
|
||||
|
||||
if should_show
|
||||
&& let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
|
||||
.record_model_migration_seen(pending.from_model.as_str(), pending.to_model.as_str())
|
||||
.apply()
|
||||
.await
|
||||
{
|
||||
tracing::error!(
|
||||
error = %err,
|
||||
"failed to persist model migration notice acknowledgement"
|
||||
);
|
||||
self.chat_widget.add_error_message(format!(
|
||||
"Failed to persist model migration notice acknowledgement: {err}"
|
||||
));
|
||||
}
|
||||
|
||||
if let Err(err) =
|
||||
crate::model_migration_notice::clear_pending_model_migration_notice(&self.config).await
|
||||
{
|
||||
tracing::error!(
|
||||
error = %err,
|
||||
"failed to clear pending model migration notice file"
|
||||
);
|
||||
self.chat_widget.add_error_message(format!(
|
||||
"Failed to clear pending model migration notice: {err}"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_schedule_model_migration_notice(&self, used_model: String) {
|
||||
let config = self.config.clone();
|
||||
let models_manager = self.server.get_models_manager();
|
||||
|
||||
tokio::spawn(async move {
|
||||
// Build the candidate migration notice using the current models list (remote if
|
||||
// refreshed, otherwise the startup fallback seeded in `ModelsManager`).
|
||||
let available_models = models_manager.list_models(&config).await;
|
||||
let upgrade = available_models
|
||||
.iter()
|
||||
.find(|preset| preset.model == used_model)
|
||||
.and_then(|preset| preset.upgrade.as_ref());
|
||||
let Some(upgrade) = upgrade else {
|
||||
return;
|
||||
};
|
||||
|
||||
if migration_prompt_hidden(&config, upgrade.migration_config_key.as_str()) {
|
||||
return;
|
||||
}
|
||||
|
||||
let target_model = upgrade.id.clone();
|
||||
if !should_show_model_migration_prompt(
|
||||
used_model.as_str(),
|
||||
target_model.as_str(),
|
||||
&config.notices.model_migrations,
|
||||
&available_models,
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Err(err) = crate::model_migration_notice::write_pending_model_migration_notice(
|
||||
&config,
|
||||
used_model.as_str(),
|
||||
target_model.as_str(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::error!(
|
||||
error = %err,
|
||||
"failed to persist pending model migration notice"
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn run(
|
||||
tui: &mut tui::Tui,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
mut config: Config,
|
||||
config: Config,
|
||||
active_profile: Option<String>,
|
||||
initial_prompt: Option<String>,
|
||||
initial_images: Vec<PathBuf>,
|
||||
@@ -352,30 +346,8 @@ impl App {
|
||||
auth_manager.clone(),
|
||||
SessionSource::Cli,
|
||||
));
|
||||
let mut model = conversation_manager
|
||||
.get_models_manager()
|
||||
.get_model(&config.model, &config)
|
||||
.await;
|
||||
let exit_info = handle_model_migration_prompt_if_needed(
|
||||
tui,
|
||||
&mut config,
|
||||
model.as_str(),
|
||||
&app_event_tx,
|
||||
conversation_manager.get_models_manager(),
|
||||
)
|
||||
.await;
|
||||
if let Some(exit_info) = exit_info {
|
||||
return Ok(exit_info);
|
||||
}
|
||||
if let Some(updated_model) = config.model.clone() {
|
||||
model = updated_model;
|
||||
}
|
||||
|
||||
let enhanced_keys_supported = tui.enhanced_keys_supported();
|
||||
let model_family = conversation_manager
|
||||
.get_models_manager()
|
||||
.construct_model_family(model.as_str(), &config)
|
||||
.await;
|
||||
let mut chat_widget = match resume_selection {
|
||||
ResumeSelection::StartFresh | ResumeSelection::Exit => {
|
||||
let init = crate::chatwidget::ChatWidgetInit {
|
||||
@@ -389,7 +361,6 @@ impl App {
|
||||
models_manager: conversation_manager.get_models_manager(),
|
||||
feedback: feedback.clone(),
|
||||
is_first_run,
|
||||
model_family: model_family.clone(),
|
||||
};
|
||||
ChatWidget::new(init, conversation_manager.clone())
|
||||
}
|
||||
@@ -415,7 +386,6 @@ impl App {
|
||||
models_manager: conversation_manager.get_models_manager(),
|
||||
feedback: feedback.clone(),
|
||||
is_first_run,
|
||||
model_family: model_family.clone(),
|
||||
};
|
||||
ChatWidget::new_from_existing(
|
||||
init,
|
||||
@@ -437,7 +407,7 @@ impl App {
|
||||
chat_widget,
|
||||
auth_manager: auth_manager.clone(),
|
||||
config,
|
||||
current_model: model.clone(),
|
||||
current_model: String::new(),
|
||||
active_profile,
|
||||
file_search,
|
||||
enhanced_keys_supported,
|
||||
@@ -559,11 +529,6 @@ impl App {
|
||||
}
|
||||
|
||||
async fn handle_event(&mut self, tui: &mut tui::Tui, event: AppEvent) -> Result<bool> {
|
||||
let model_family = self
|
||||
.server
|
||||
.get_models_manager()
|
||||
.construct_model_family(self.current_model.as_str(), &self.config)
|
||||
.await;
|
||||
match event {
|
||||
AppEvent::NewSession => {
|
||||
let summary = session_summary(
|
||||
@@ -582,10 +547,9 @@ impl App {
|
||||
models_manager: self.server.get_models_manager(),
|
||||
feedback: self.feedback.clone(),
|
||||
is_first_run: false,
|
||||
model_family: model_family.clone(),
|
||||
};
|
||||
self.chat_widget = ChatWidget::new(init, self.server.clone());
|
||||
self.current_model = model_family.get_model_slug().to_string();
|
||||
self.current_model.clear();
|
||||
if let Some(summary) = summary {
|
||||
let mut lines: Vec<Line<'static>> = vec![summary.usage_line.clone().into()];
|
||||
if let Some(command) = summary.resume_command {
|
||||
@@ -632,14 +596,17 @@ impl App {
|
||||
models_manager: self.server.get_models_manager(),
|
||||
feedback: self.feedback.clone(),
|
||||
is_first_run: false,
|
||||
model_family: model_family.clone(),
|
||||
};
|
||||
self.chat_widget = ChatWidget::new_from_existing(
|
||||
init,
|
||||
resumed.conversation,
|
||||
resumed.session_configured,
|
||||
);
|
||||
self.current_model = model_family.get_model_slug().to_string();
|
||||
self.current_model = self
|
||||
.chat_widget
|
||||
.get_model_family()
|
||||
.map(|mf| mf.get_model_slug().to_string())
|
||||
.unwrap_or_default();
|
||||
if let Some(summary) = summary {
|
||||
let mut lines: Vec<Line<'static>> =
|
||||
vec![summary.usage_line.clone().into()];
|
||||
@@ -722,12 +689,21 @@ impl App {
|
||||
self.suppress_shutdown_complete = false;
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let configured_model = match &event.msg {
|
||||
EventMsg::SessionConfigured(ev) => Some(ev.model_family.slug.clone()),
|
||||
_ => None,
|
||||
};
|
||||
if let EventMsg::ListSkillsResponse(response) = &event.msg {
|
||||
let cwd = self.chat_widget.config_ref().cwd.clone();
|
||||
let errors = errors_for_cwd(&cwd, response);
|
||||
emit_skill_load_warnings(&self.app_event_tx, &errors);
|
||||
}
|
||||
self.chat_widget.handle_codex_event(event);
|
||||
|
||||
if let Some(model) = configured_model {
|
||||
self.on_session_configured_model(model).await;
|
||||
}
|
||||
}
|
||||
AppEvent::ConversationHistory(ev) => {
|
||||
self.on_conversation_history_for_backtrack(tui, ev).await?;
|
||||
@@ -1066,24 +1042,6 @@ impl App {
|
||||
));
|
||||
}
|
||||
}
|
||||
AppEvent::PersistModelMigrationPromptAcknowledged {
|
||||
from_model,
|
||||
to_model,
|
||||
} => {
|
||||
if let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
|
||||
.record_model_migration_seen(from_model.as_str(), to_model.as_str())
|
||||
.apply()
|
||||
.await
|
||||
{
|
||||
tracing::error!(
|
||||
error = %err,
|
||||
"failed to persist model migration prompt acknowledgement"
|
||||
);
|
||||
self.chat_widget.add_error_message(format!(
|
||||
"Failed to save model migration prompt preference: {err}"
|
||||
));
|
||||
}
|
||||
}
|
||||
AppEvent::OpenApprovalsPopup => {
|
||||
self.chat_widget.open_approvals_popup();
|
||||
}
|
||||
@@ -1349,15 +1307,47 @@ mod tests {
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_core::protocol::SessionConfiguredEvent;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::openai_models::ModelUpgrade;
|
||||
use ratatui::prelude::Line;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
|
||||
fn test_model_family(slug: &str) -> codex_protocol::openai_models::ModelFamily {
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
|
||||
ModelFamily {
|
||||
slug: slug.to_string(),
|
||||
family: slug.to_string(),
|
||||
needs_special_apply_patch_instructions: false,
|
||||
context_window: None,
|
||||
auto_compact_token_limit: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_effort: Some(ReasoningEffort::Medium),
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
supports_parallel_tool_calls: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: String::new(),
|
||||
experimental_supported_tools: Vec::new(),
|
||||
effective_context_window_percent: 95,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
shell_type: ConfigShellToolType::Default,
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
}
|
||||
}
|
||||
|
||||
async fn make_test_app() -> App {
|
||||
let (chat_widget, app_event_tx, _rx, _op_rx) = make_chatwidget_manual_with_sender().await;
|
||||
let config = chat_widget.config_ref().clone();
|
||||
let current_model = chat_widget.get_model_family().get_model_slug().to_string();
|
||||
let current_model = chat_widget
|
||||
.get_model_family()
|
||||
.map(|mf| mf.get_model_slug().to_string())
|
||||
.unwrap_or_default();
|
||||
let server = Arc::new(ConversationManager::with_models_provider(
|
||||
CodexAuth::from_api_key("Test API Key"),
|
||||
config.model_provider.clone(),
|
||||
@@ -1396,7 +1386,10 @@ mod tests {
|
||||
) {
|
||||
let (chat_widget, app_event_tx, rx, op_rx) = make_chatwidget_manual_with_sender().await;
|
||||
let config = chat_widget.config_ref().clone();
|
||||
let current_model = chat_widget.get_model_family().get_model_slug().to_string();
|
||||
let current_model = chat_widget
|
||||
.get_model_family()
|
||||
.map(|mf| mf.get_model_slug().to_string())
|
||||
.unwrap_or_default();
|
||||
let server = Arc::new(ConversationManager::with_models_provider(
|
||||
CodexAuth::from_api_key("Test API Key"),
|
||||
config.model_provider.clone(),
|
||||
@@ -1514,7 +1507,11 @@ mod tests {
|
||||
&available,
|
||||
));
|
||||
|
||||
assert!(target_preset_for_upgrade(&available, "missing-target").is_none());
|
||||
assert!(
|
||||
!available
|
||||
.iter()
|
||||
.any(|preset| preset.model == "missing-target")
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -1555,7 +1552,7 @@ mod tests {
|
||||
let make_header = |is_first| {
|
||||
let event = SessionConfiguredEvent {
|
||||
session_id: ConversationId::new(),
|
||||
model: "gpt-test".to_string(),
|
||||
model_family: test_model_family("gpt-test"),
|
||||
model_provider_id: "test-provider".to_string(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
@@ -1610,7 +1607,7 @@ mod tests {
|
||||
let conversation_id = ConversationId::new();
|
||||
let event = SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model: "gpt-test".to_string(),
|
||||
model_family: test_model_family("gpt-test"),
|
||||
model_provider_id: "test-provider".to_string(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
|
||||
@@ -338,10 +338,9 @@ impl App {
|
||||
) {
|
||||
let conv = new_conv.conversation;
|
||||
let session_configured = new_conv.session_configured;
|
||||
let model_family = self.chat_widget.get_model_family();
|
||||
let current_model = session_configured.model_family.slug.clone();
|
||||
let init = crate::chatwidget::ChatWidgetInit {
|
||||
config: cfg,
|
||||
model_family: model_family.clone(),
|
||||
frame_requester: tui.frame_requester(),
|
||||
app_event_tx: self.app_event_tx.clone(),
|
||||
initial_prompt: None,
|
||||
@@ -354,7 +353,7 @@ impl App {
|
||||
};
|
||||
self.chat_widget =
|
||||
crate::chatwidget::ChatWidget::new_from_existing(init, conv, session_configured);
|
||||
self.current_model = model_family.get_model_slug().to_string();
|
||||
self.current_model = current_model;
|
||||
// Trim transcript up to the selected user message and re-render it.
|
||||
self.trim_transcript_for_backtrack(nth_user_message);
|
||||
self.render_transcript_once(tui);
|
||||
|
||||
@@ -143,12 +143,6 @@ pub(crate) enum AppEvent {
|
||||
/// Persist the acknowledgement flag for the rate limit switch prompt.
|
||||
PersistRateLimitSwitchPromptHidden,
|
||||
|
||||
/// Persist the acknowledgement flag for the model migration prompt.
|
||||
PersistModelMigrationPromptAcknowledged {
|
||||
from_model: String,
|
||||
to_model: String,
|
||||
},
|
||||
|
||||
/// Skip the next world-writable scan (one-shot) after a user-confirmed continue.
|
||||
#[cfg_attr(not(target_os = "windows"), allow(dead_code))]
|
||||
SkipNextWorldWritableScan,
|
||||
|
||||
@@ -15,7 +15,6 @@ use codex_core::features::Feature;
|
||||
use codex_core::git_info::current_branch_name;
|
||||
use codex_core::git_info::local_git_branches;
|
||||
use codex_core::models_manager::manager::ModelsManager;
|
||||
use codex_core::models_manager::model_family::ModelFamily;
|
||||
use codex_core::project_doc::DEFAULT_PROJECT_DOC_FILENAME;
|
||||
use codex_core::protocol::AgentMessageDeltaEvent;
|
||||
use codex_core::protocol::AgentMessageEvent;
|
||||
@@ -67,6 +66,7 @@ use codex_core::skills::model::SkillMetadata;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::account::PlanType;
|
||||
use codex_protocol::approvals::ElicitationRequestEvent;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use codex_protocol::parse_command::ParsedCommand;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use crossterm::event::KeyCode;
|
||||
@@ -291,7 +291,6 @@ pub(crate) struct ChatWidgetInit {
|
||||
pub(crate) models_manager: Arc<ModelsManager>,
|
||||
pub(crate) feedback: codex_feedback::CodexFeedback,
|
||||
pub(crate) is_first_run: bool,
|
||||
pub(crate) model_family: ModelFamily,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
@@ -316,7 +315,7 @@ pub(crate) struct ChatWidget {
|
||||
bottom_pane: BottomPane,
|
||||
active_cell: Option<Box<dyn HistoryCell>>,
|
||||
config: Config,
|
||||
model_family: ModelFamily,
|
||||
model_family: Option<ModelFamily>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
models_manager: Arc<ModelsManager>,
|
||||
session_header: SessionHeader,
|
||||
@@ -432,7 +431,12 @@ impl ChatWidget {
|
||||
self.conversation_id = Some(event.session_id);
|
||||
self.current_rollout_path = Some(event.rollout_path.clone());
|
||||
let initial_messages = event.initial_messages.clone();
|
||||
let model_for_header = event.model.clone();
|
||||
let model_family = codex_core::models_manager::model_family::with_config_overrides(
|
||||
event.model_family.clone(),
|
||||
&self.config,
|
||||
);
|
||||
let model_for_header = model_family.get_model_slug().to_string();
|
||||
self.model_family = Some(model_family);
|
||||
self.session_header.set_model(&model_for_header);
|
||||
self.add_to_history(history_cell::new_session_info(
|
||||
&self.config,
|
||||
@@ -449,9 +453,15 @@ impl ChatWidget {
|
||||
cwds: Vec::new(),
|
||||
force_reload: false,
|
||||
});
|
||||
let had_initial_message = self.initial_user_message.is_some();
|
||||
if let Some(user_message) = self.initial_user_message.take() {
|
||||
self.submit_user_message(user_message);
|
||||
}
|
||||
if !had_initial_message {
|
||||
// If there are queued inputs from startup, begin the first turn now.
|
||||
// Subsequent queued inputs are sent turn-by-turn via `maybe_send_next_queued_input`.
|
||||
self.maybe_send_next_queued_input();
|
||||
}
|
||||
if !self.suppress_session_configured_redraw {
|
||||
self.request_redraw();
|
||||
}
|
||||
@@ -530,7 +540,11 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
fn on_agent_reasoning_final(&mut self) {
|
||||
let reasoning_summary_format = self.get_model_family().reasoning_summary_format;
|
||||
let reasoning_summary_format = self
|
||||
.model_family
|
||||
.as_ref()
|
||||
.map(|mf| mf.reasoning_summary_format.clone())
|
||||
.unwrap_or_default();
|
||||
// At the end of a reasoning block, record transcript-only content.
|
||||
self.full_reasoning_buffer.push_str(&self.reasoning_buffer);
|
||||
if !self.full_reasoning_buffer.is_empty() {
|
||||
@@ -605,7 +619,7 @@ impl ChatWidget {
|
||||
|
||||
fn context_remaining_percent(&self, info: &TokenUsageInfo) -> Option<i64> {
|
||||
info.model_context_window
|
||||
.or(self.model_family.context_window)
|
||||
.or(self.model_family.as_ref().and_then(|mf| mf.context_window))
|
||||
.map(|window| {
|
||||
info.last_token_usage
|
||||
.percent_of_context_window_remaining(window)
|
||||
@@ -677,7 +691,10 @@ impl ChatWidget {
|
||||
|
||||
if high_usage
|
||||
&& !self.rate_limit_switch_prompt_hidden()
|
||||
&& self.model_family.get_model_slug() != NUDGE_MODEL_SLUG
|
||||
&& self
|
||||
.model_family
|
||||
.as_ref()
|
||||
.is_some_and(|mf| mf.get_model_slug() != NUDGE_MODEL_SLUG)
|
||||
&& !matches!(
|
||||
self.rate_limit_switch_prompt,
|
||||
RateLimitSwitchPromptState::Shown
|
||||
@@ -711,7 +728,7 @@ impl ChatWidget {
|
||||
self.stream_controller = None;
|
||||
self.maybe_show_pending_rate_limit_prompt();
|
||||
}
|
||||
pub(crate) fn get_model_family(&self) -> ModelFamily {
|
||||
pub(crate) fn get_model_family(&self) -> Option<ModelFamily> {
|
||||
self.model_family.clone()
|
||||
}
|
||||
|
||||
@@ -1416,11 +1433,8 @@ impl ChatWidget {
|
||||
models_manager,
|
||||
feedback,
|
||||
is_first_run,
|
||||
model_family,
|
||||
} = common;
|
||||
let model_slug = model_family.get_model_slug().to_string();
|
||||
let mut config = config;
|
||||
config.model = Some(model_slug.clone());
|
||||
let config = config;
|
||||
let mut rng = rand::rng();
|
||||
let placeholder = EXAMPLE_PROMPTS[rng.random_range(0..EXAMPLE_PROMPTS.len())].to_string();
|
||||
let codex_op_tx = spawn_agent(config.clone(), app_event_tx.clone(), conversation_manager);
|
||||
@@ -1441,10 +1455,10 @@ impl ChatWidget {
|
||||
}),
|
||||
active_cell: None,
|
||||
config,
|
||||
model_family,
|
||||
model_family: None,
|
||||
auth_manager,
|
||||
models_manager,
|
||||
session_header: SessionHeader::new(model_slug),
|
||||
session_header: SessionHeader::new("Starting...".to_string()),
|
||||
initial_user_message: create_initial_user_message(
|
||||
initial_prompt.unwrap_or_default(),
|
||||
initial_images,
|
||||
@@ -1502,9 +1516,12 @@ impl ChatWidget {
|
||||
auth_manager,
|
||||
models_manager,
|
||||
feedback,
|
||||
model_family,
|
||||
..
|
||||
} = common;
|
||||
let model_family = codex_core::models_manager::model_family::with_config_overrides(
|
||||
session_configured.model_family.clone(),
|
||||
&config,
|
||||
);
|
||||
let model_slug = model_family.get_model_slug().to_string();
|
||||
let mut rng = rand::rng();
|
||||
let placeholder = EXAMPLE_PROMPTS[rng.random_range(0..EXAMPLE_PROMPTS.len())].to_string();
|
||||
@@ -1528,7 +1545,7 @@ impl ChatWidget {
|
||||
}),
|
||||
active_cell: None,
|
||||
config,
|
||||
model_family,
|
||||
model_family: Some(model_family),
|
||||
auth_manager,
|
||||
models_manager,
|
||||
session_header: SessionHeader::new(model_slug),
|
||||
@@ -1723,7 +1740,7 @@ impl ChatWidget {
|
||||
return;
|
||||
}
|
||||
const INIT_PROMPT: &str = include_str!("../prompt_for_init_command.md");
|
||||
self.submit_user_message(INIT_PROMPT.to_string().into());
|
||||
self.queue_user_message(INIT_PROMPT.to_string().into());
|
||||
}
|
||||
SlashCommand::Compact => {
|
||||
self.clear_token_usage();
|
||||
@@ -1733,7 +1750,14 @@ impl ChatWidget {
|
||||
self.open_review_popup();
|
||||
}
|
||||
SlashCommand::Model => {
|
||||
self.open_model_popup();
|
||||
if self.model_family.is_none() {
|
||||
self.add_info_message(
|
||||
"`/model` is unavailable until startup finishes.".to_string(),
|
||||
None,
|
||||
);
|
||||
} else {
|
||||
self.open_model_popup();
|
||||
}
|
||||
}
|
||||
SlashCommand::Approvals => {
|
||||
self.open_approvals_popup();
|
||||
@@ -1904,7 +1928,7 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
fn queue_user_message(&mut self, user_message: UserMessage) {
|
||||
if self.bottom_pane.is_task_running() {
|
||||
if self.conversation_id.is_none() || self.bottom_pane.is_task_running() {
|
||||
self.queued_user_messages.push_back(user_message);
|
||||
self.refresh_queued_user_messages();
|
||||
} else {
|
||||
@@ -2213,7 +2237,7 @@ impl ChatWidget {
|
||||
|
||||
// If idle and there are queued inputs, submit exactly one to start the next turn.
|
||||
fn maybe_send_next_queued_input(&mut self) {
|
||||
if self.bottom_pane.is_task_running() {
|
||||
if self.conversation_id.is_none() || self.bottom_pane.is_task_running() {
|
||||
return;
|
||||
}
|
||||
if let Some(user_message) = self.queued_user_messages.pop_front() {
|
||||
@@ -2242,6 +2266,14 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
pub(crate) fn add_status_output(&mut self) {
|
||||
let Some(model_family) = self.model_family.as_ref() else {
|
||||
self.add_info_message(
|
||||
"`/status` is unavailable until startup finishes.".to_string(),
|
||||
None,
|
||||
);
|
||||
return;
|
||||
};
|
||||
|
||||
let default_usage = TokenUsage::default();
|
||||
let (total_usage, context_usage) = if let Some(ti) = &self.token_info {
|
||||
(&ti.total_token_usage, Some(&ti.last_token_usage))
|
||||
@@ -2251,14 +2283,14 @@ impl ChatWidget {
|
||||
self.add_to_history(crate::status::new_status_output(
|
||||
&self.config,
|
||||
self.auth_manager.as_ref(),
|
||||
&self.model_family,
|
||||
model_family,
|
||||
total_usage,
|
||||
context_usage,
|
||||
&self.conversation_id,
|
||||
self.rate_limit_snapshot.as_ref(),
|
||||
self.plan_type,
|
||||
Local::now(),
|
||||
self.model_family.get_model_slug(),
|
||||
model_family.get_model_slug(),
|
||||
));
|
||||
}
|
||||
|
||||
@@ -2411,7 +2443,14 @@ impl ChatWidget {
|
||||
/// Open a popup to choose a quick auto model. Selecting "All models"
|
||||
/// opens the full picker with every available preset.
|
||||
pub(crate) fn open_model_popup(&mut self) {
|
||||
let current_model = self.model_family.get_model_slug().to_string();
|
||||
let Some(model_family) = self.model_family.as_ref() else {
|
||||
self.add_info_message(
|
||||
"`/model` is unavailable until startup finishes.".to_string(),
|
||||
None,
|
||||
);
|
||||
return;
|
||||
};
|
||||
let current_model = model_family.get_model_slug().to_string();
|
||||
let presets: Vec<ModelPreset> =
|
||||
// todo(aibrahim): make this async function
|
||||
match self.models_manager.try_list_models(&self.config) {
|
||||
@@ -2519,7 +2558,11 @@ impl ChatWidget {
|
||||
return;
|
||||
}
|
||||
|
||||
let current_model = self.model_family.get_model_slug().to_string();
|
||||
let current_model = self
|
||||
.model_family
|
||||
.as_ref()
|
||||
.map(|mf| mf.get_model_slug().to_string())
|
||||
.unwrap_or_default();
|
||||
let mut items: Vec<SelectionItem> = Vec::new();
|
||||
for preset in presets.into_iter() {
|
||||
let description =
|
||||
@@ -2650,7 +2693,10 @@ impl ChatWidget {
|
||||
.or(Some(default_effort));
|
||||
|
||||
let model_slug = preset.model.to_string();
|
||||
let is_current_model = self.model_family.get_model_slug() == preset.model;
|
||||
let is_current_model = self
|
||||
.model_family
|
||||
.as_ref()
|
||||
.is_some_and(|mf| mf.get_model_slug() == preset.model);
|
||||
let highlight_choice = if is_current_model {
|
||||
self.config.model_reasoning_effort
|
||||
} else {
|
||||
@@ -3242,7 +3288,7 @@ impl ChatWidget {
|
||||
/// Set the model in the widget's config copy.
|
||||
pub(crate) fn set_model(&mut self, model: &str, model_family: ModelFamily) {
|
||||
self.session_header.set_model(model);
|
||||
self.model_family = model_family;
|
||||
self.model_family = Some(model_family);
|
||||
}
|
||||
|
||||
pub(crate) fn add_info_message(&mut self, message: String, hint: Option<String>) {
|
||||
|
||||
@@ -97,6 +97,34 @@ fn snapshot(percent: f64) -> RateLimitSnapshot {
|
||||
}
|
||||
}
|
||||
|
||||
fn test_model_family(slug: &str) -> ModelFamily {
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
|
||||
ModelFamily {
|
||||
slug: slug.to_string(),
|
||||
family: slug.to_string(),
|
||||
needs_special_apply_patch_instructions: false,
|
||||
context_window: None,
|
||||
auto_compact_token_limit: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_effort: Some(ReasoningEffort::default()),
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
supports_parallel_tool_calls: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: String::new(),
|
||||
experimental_supported_tools: Vec::new(),
|
||||
effective_context_window_percent: 95,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
shell_type: ConfigShellToolType::Default,
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn resumed_initial_messages_render_history() {
|
||||
let (mut chat, mut rx, _ops) = make_chatwidget_manual(None).await;
|
||||
@@ -105,7 +133,7 @@ async fn resumed_initial_messages_render_history() {
|
||||
let rollout_file = NamedTempFile::new().unwrap();
|
||||
let configured = codex_core::protocol::SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model: "test-model".to_string(),
|
||||
model_family: test_model_family("test-model"),
|
||||
model_provider_id: "test-provider".to_string(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
@@ -312,8 +340,6 @@ async fn helpers_are_available_and_do_not_panic() {
|
||||
let (tx_raw, _rx) = unbounded_channel::<AppEvent>();
|
||||
let tx = AppEventSender::new(tx_raw);
|
||||
let cfg = test_config().await;
|
||||
let resolved_model = ModelsManager::get_model_offline(cfg.model.as_deref());
|
||||
let model_family = ModelsManager::construct_model_family_offline(&resolved_model, &cfg);
|
||||
let conversation_manager = Arc::new(ConversationManager::with_models_provider(
|
||||
CodexAuth::from_api_key("test"),
|
||||
cfg.model_provider.clone(),
|
||||
@@ -330,7 +356,6 @@ async fn helpers_are_available_and_do_not_panic() {
|
||||
models_manager: conversation_manager.get_models_manager(),
|
||||
feedback: codex_feedback::CodexFeedback::new(),
|
||||
is_first_run: true,
|
||||
model_family,
|
||||
};
|
||||
let mut w = ChatWidget::new(init, conversation_manager);
|
||||
// Basic construction sanity.
|
||||
@@ -372,7 +397,10 @@ async fn make_chatwidget_manual(
|
||||
bottom_pane: bottom,
|
||||
active_cell: None,
|
||||
config: cfg.clone(),
|
||||
model_family: ModelsManager::construct_model_family_offline(&resolved_model, &cfg),
|
||||
model_family: Some(ModelsManager::construct_model_family_offline(
|
||||
&resolved_model,
|
||||
&cfg,
|
||||
)),
|
||||
auth_manager: auth_manager.clone(),
|
||||
models_manager: Arc::new(ModelsManager::new(auth_manager)),
|
||||
session_header: SessionHeader::new(resolved_model.clone()),
|
||||
@@ -1023,6 +1051,27 @@ async fn alt_up_edits_most_recent_queued_message() {
|
||||
async fn enqueueing_history_prompt_multiple_times_is_stable() {
|
||||
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await;
|
||||
|
||||
// Ensure the session is configured so the first submission is sent (not queued),
|
||||
// which seeds the prompt history for the subsequent Up-arrow recalls.
|
||||
let conversation_id = ConversationId::new();
|
||||
let rollout_file = NamedTempFile::new().unwrap();
|
||||
chat.handle_codex_event(Event {
|
||||
id: "configured".into(),
|
||||
msg: EventMsg::SessionConfigured(codex_core::protocol::SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model_family: test_model_family("test-model"),
|
||||
model_provider_id: "test-provider".to_string(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
cwd: PathBuf::from("/home/user/project"),
|
||||
reasoning_effort: Some(ReasoningEffortConfig::default()),
|
||||
history_log_id: 0,
|
||||
history_entry_count: 0,
|
||||
initial_messages: None,
|
||||
rollout_path: rollout_file.path().to_path_buf(),
|
||||
}),
|
||||
});
|
||||
|
||||
// Submit an initial prompt to seed history.
|
||||
chat.bottom_pane.set_composer_text("repeat me".to_string());
|
||||
chat.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE));
|
||||
|
||||
@@ -868,13 +868,14 @@ pub(crate) fn new_session_info(
|
||||
is_first_event: bool,
|
||||
) -> SessionInfoCell {
|
||||
let SessionConfiguredEvent {
|
||||
model,
|
||||
model_family,
|
||||
reasoning_effort,
|
||||
..
|
||||
} = event;
|
||||
let used_model = model_family.slug;
|
||||
// Header box rendered as history (so it appears at the very top)
|
||||
let header = SessionHeaderHistoryCell::new(
|
||||
model.clone(),
|
||||
used_model.clone(),
|
||||
reasoning_effort,
|
||||
config.cwd.clone(),
|
||||
CODEX_CLI_VERSION,
|
||||
@@ -922,11 +923,11 @@ pub(crate) fn new_session_info(
|
||||
{
|
||||
parts.push(Box::new(tooltips));
|
||||
}
|
||||
if requested_model != model {
|
||||
if requested_model != used_model {
|
||||
let lines = vec![
|
||||
"model changed:".magenta().bold().into(),
|
||||
format!("requested: {requested_model}").into(),
|
||||
format!("used: {model}").into(),
|
||||
format!("used: {used_model}").into(),
|
||||
];
|
||||
parts.push(Box::new(PlainHistoryCell { lines }));
|
||||
}
|
||||
|
||||
@@ -58,6 +58,9 @@ pub mod live_wrap;
|
||||
mod markdown;
|
||||
mod markdown_render;
|
||||
mod markdown_stream;
|
||||
pub mod model_migration_notice;
|
||||
// Model migration prompt UI is no longer used in production; keep it for snapshot tests.
|
||||
#[cfg(test)]
|
||||
mod model_migration;
|
||||
mod notifications;
|
||||
pub mod onboarding;
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::key_hint;
|
||||
use crate::render::Insets;
|
||||
use crate::render::renderable::ColumnRenderable;
|
||||
use crate::render::renderable::Renderable;
|
||||
use crate::render::renderable::RenderableExt as _;
|
||||
use crate::selection_list::selection_option_row;
|
||||
|
||||
use crate::tui::FrameRequester;
|
||||
use crate::tui::Tui;
|
||||
use crate::tui::TuiEvent;
|
||||
|
||||
109
codex-rs/tui/src/model_migration_notice.rs
Normal file
109
codex-rs/tui/src/model_migration_notice.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
use codex_core::config::Config;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Pending "show on next run" model migration notice.
|
||||
///
|
||||
/// This is persisted outside `config.toml` to avoid growing the config file with
|
||||
/// ephemeral UI state.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
pub struct PendingModelMigrationNotice {
|
||||
pub from_model: String,
|
||||
pub to_model: String,
|
||||
}
|
||||
|
||||
const MODEL_MIGRATION_NOTICE_FILENAME: &str = "model_migration_notice.json";
|
||||
|
||||
fn pending_model_migration_notice_filepath(config: &Config) -> PathBuf {
|
||||
config.codex_home.join(MODEL_MIGRATION_NOTICE_FILENAME)
|
||||
}
|
||||
|
||||
/// Read a pending notice if present.
|
||||
///
|
||||
/// Returns `Ok(None)` if no pending notice is scheduled.
|
||||
pub async fn read_pending_model_migration_notice(
|
||||
config: &Config,
|
||||
) -> anyhow::Result<Option<PendingModelMigrationNotice>> {
|
||||
let path = pending_model_migration_notice_filepath(config);
|
||||
let contents = match tokio::fs::read_to_string(&path).await {
|
||||
Ok(contents) => contents,
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(None),
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
Ok(Some(serde_json::from_str(contents.trim())?))
|
||||
}
|
||||
|
||||
/// Persist a pending notice to be displayed on the next run.
|
||||
pub async fn write_pending_model_migration_notice(
|
||||
config: &Config,
|
||||
from_model: &str,
|
||||
to_model: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let path = pending_model_migration_notice_filepath(config);
|
||||
let notice = PendingModelMigrationNotice {
|
||||
from_model: from_model.to_string(),
|
||||
to_model: to_model.to_string(),
|
||||
};
|
||||
let json_line = format!("{}\n", serde_json::to_string(¬ice)?);
|
||||
if let Some(parent) = path.parent() {
|
||||
tokio::fs::create_dir_all(parent).await?;
|
||||
}
|
||||
tokio::fs::write(path, json_line).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Clear any pending notice.
|
||||
pub async fn clear_pending_model_migration_notice(config: &Config) -> anyhow::Result<()> {
|
||||
let path = pending_model_migration_notice_filepath(config);
|
||||
match tokio::fs::remove_file(path).await {
|
||||
Ok(()) => Ok(()),
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(()),
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[tokio::test]
|
||||
async fn write_read_clear_round_trips() {
|
||||
let tmp = tempfile::tempdir().expect("tmpdir");
|
||||
let config = ConfigBuilder::default()
|
||||
.codex_home(tmp.path().to_path_buf())
|
||||
.build()
|
||||
.await
|
||||
.expect("config");
|
||||
|
||||
assert_eq!(
|
||||
read_pending_model_migration_notice(&config).await.unwrap(),
|
||||
None
|
||||
);
|
||||
|
||||
write_pending_model_migration_notice(&config, "gpt-5", "gpt-5.1")
|
||||
.await
|
||||
.expect("write");
|
||||
|
||||
assert_eq!(
|
||||
read_pending_model_migration_notice(&config)
|
||||
.await
|
||||
.expect("read"),
|
||||
Some(PendingModelMigrationNotice {
|
||||
from_model: "gpt-5".to_string(),
|
||||
to_model: "gpt-5.1".to_string(),
|
||||
})
|
||||
);
|
||||
|
||||
clear_pending_model_migration_notice(&config)
|
||||
.await
|
||||
.expect("clear");
|
||||
|
||||
assert_eq!(
|
||||
read_pending_model_migration_notice(&config).await.unwrap(),
|
||||
None
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -7,12 +7,12 @@ use chrono::DateTime;
|
||||
use chrono::Local;
|
||||
use codex_common::create_config_summary_entries;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::models_manager::model_family::ModelFamily;
|
||||
use codex_core::protocol::NetworkAccess;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::account::PlanType;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use ratatui::prelude::*;
|
||||
use ratatui::style::Stylize;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
@@ -8,13 +8,13 @@ use codex_core::AuthManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use codex_core::models_manager::manager::ModelsManager;
|
||||
use codex_core::models_manager::model_family::ModelFamily;
|
||||
use codex_core::protocol::CreditsSnapshot;
|
||||
use codex_core::protocol::RateLimitSnapshot;
|
||||
use codex_core::protocol::RateLimitWindow;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use insta::assert_snapshot;
|
||||
use ratatui::prelude::*;
|
||||
|
||||
@@ -10,9 +10,6 @@ use crate::exec_command::strip_bash_lc_and_escape;
|
||||
use crate::file_search::FileSearchManager;
|
||||
use crate::history_cell::HistoryCell;
|
||||
use crate::history_cell::UserHistoryCell;
|
||||
use crate::model_migration::ModelMigrationOutcome;
|
||||
use crate::model_migration::migration_copy_for_models;
|
||||
use crate::model_migration::run_model_migration_prompt;
|
||||
use crate::pager_overlay::Overlay;
|
||||
use crate::render::highlight::highlight_bash_to_lines;
|
||||
use crate::render::renderable::Renderable;
|
||||
@@ -41,7 +38,6 @@ use codex_core::config::Config;
|
||||
use codex_core::config::edit::ConfigEditsBuilder;
|
||||
#[cfg(target_os = "windows")]
|
||||
use codex_core::features::Feature;
|
||||
use codex_core::models_manager::manager::ModelsManager;
|
||||
use codex_core::models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG;
|
||||
use codex_core::models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG;
|
||||
use codex_core::protocol::EventMsg;
|
||||
@@ -54,7 +50,6 @@ use codex_core::protocol::TokenUsage;
|
||||
use codex_core::terminal::terminal_info;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::openai_models::ModelPreset;
|
||||
use codex_protocol::openai_models::ModelUpgrade;
|
||||
use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig;
|
||||
use color_eyre::eyre::Result;
|
||||
use color_eyre::eyre::WrapErr;
|
||||
@@ -207,115 +202,6 @@ fn migration_prompt_hidden(config: &Config, migration_config_key: &str) -> bool
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_model_migration_prompt_if_needed(
|
||||
tui: &mut tui::Tui,
|
||||
config: &mut Config,
|
||||
model: &str,
|
||||
app_event_tx: &AppEventSender,
|
||||
models_manager: Arc<ModelsManager>,
|
||||
) -> Option<AppExitInfo> {
|
||||
let available_models = models_manager.list_models(config).await;
|
||||
let upgrade = available_models
|
||||
.iter()
|
||||
.find(|preset| preset.model == model)
|
||||
.and_then(|preset| preset.upgrade.as_ref());
|
||||
|
||||
if let Some(ModelUpgrade {
|
||||
id: target_model,
|
||||
reasoning_effort_mapping,
|
||||
migration_config_key,
|
||||
..
|
||||
}) = upgrade
|
||||
{
|
||||
if migration_prompt_hidden(config, migration_config_key.as_str()) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let target_model = target_model.to_string();
|
||||
if !should_show_model_migration_prompt(
|
||||
model,
|
||||
&target_model,
|
||||
&config.notices.model_migrations,
|
||||
&available_models,
|
||||
) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let current_preset = available_models.iter().find(|preset| preset.model == model);
|
||||
let target_preset = available_models
|
||||
.iter()
|
||||
.find(|preset| preset.model == target_model);
|
||||
let target_display_name = target_preset
|
||||
.map(|preset| preset.display_name.clone())
|
||||
.unwrap_or_else(|| target_model.clone());
|
||||
let heading_label = if target_display_name == model {
|
||||
target_model.clone()
|
||||
} else {
|
||||
target_display_name.clone()
|
||||
};
|
||||
let target_description = target_preset.and_then(|preset| {
|
||||
if preset.description.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(preset.description.clone())
|
||||
}
|
||||
});
|
||||
let can_opt_out = current_preset.is_some();
|
||||
let prompt_copy = migration_copy_for_models(
|
||||
model,
|
||||
&target_model,
|
||||
heading_label,
|
||||
target_description,
|
||||
can_opt_out,
|
||||
);
|
||||
match run_model_migration_prompt(tui, prompt_copy).await {
|
||||
ModelMigrationOutcome::Accepted => {
|
||||
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
|
||||
from_model: model.to_string(),
|
||||
to_model: target_model.clone(),
|
||||
});
|
||||
config.model = Some(target_model.clone());
|
||||
|
||||
let mapped_effort = if let Some(reasoning_effort_mapping) = reasoning_effort_mapping
|
||||
&& let Some(reasoning_effort) = config.model_reasoning_effort
|
||||
{
|
||||
reasoning_effort_mapping
|
||||
.get(&reasoning_effort)
|
||||
.cloned()
|
||||
.or(config.model_reasoning_effort)
|
||||
} else {
|
||||
config.model_reasoning_effort
|
||||
};
|
||||
|
||||
config.model_reasoning_effort = mapped_effort;
|
||||
|
||||
app_event_tx.send(AppEvent::UpdateModel(target_model.clone()));
|
||||
app_event_tx.send(AppEvent::UpdateReasoningEffort(mapped_effort));
|
||||
app_event_tx.send(AppEvent::PersistModelSelection {
|
||||
model: target_model.clone(),
|
||||
effort: mapped_effort,
|
||||
});
|
||||
}
|
||||
ModelMigrationOutcome::Rejected => {
|
||||
app_event_tx.send(AppEvent::PersistModelMigrationPromptAcknowledged {
|
||||
from_model: model.to_string(),
|
||||
to_model: target_model.clone(),
|
||||
});
|
||||
}
|
||||
ModelMigrationOutcome::Exit => {
|
||||
return Some(AppExitInfo {
|
||||
token_usage: TokenUsage::default(),
|
||||
conversation_id: None,
|
||||
update_action: None,
|
||||
session_lines: Vec::new(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub(crate) struct App {
|
||||
pub(crate) server: Arc<ConversationManager>,
|
||||
pub(crate) app_event_tx: AppEventSender,
|
||||
@@ -373,11 +259,125 @@ impl App {
|
||||
}
|
||||
}
|
||||
|
||||
async fn on_session_configured_model(&mut self, model: String) {
|
||||
self.maybe_emit_pending_model_migration_notice(model.as_str())
|
||||
.await;
|
||||
self.spawn_schedule_model_migration_notice(model);
|
||||
}
|
||||
|
||||
async fn maybe_emit_pending_model_migration_notice(&mut self, used_model: &str) {
|
||||
let pending = match codex_tui::model_migration_notice::read_pending_model_migration_notice(
|
||||
&self.config,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Some(pending)) => pending,
|
||||
Ok(None) => return,
|
||||
Err(err) => {
|
||||
tracing::error!(
|
||||
error = %err,
|
||||
"failed to read pending model migration notice"
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let should_show = pending.from_model == used_model;
|
||||
if should_show {
|
||||
let message = format!(
|
||||
"Recommended model upgrade: switch from {} to {}. Run /model to change.",
|
||||
pending.from_model, pending.to_model
|
||||
);
|
||||
self.app_event_tx.send(AppEvent::InsertHistoryCell(Box::new(
|
||||
crate::history_cell::new_warning_event(message),
|
||||
)));
|
||||
|
||||
self.config
|
||||
.notices
|
||||
.model_migrations
|
||||
.insert(pending.from_model.clone(), pending.to_model.clone());
|
||||
}
|
||||
|
||||
if should_show
|
||||
&& let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
|
||||
.record_model_migration_seen(pending.from_model.as_str(), pending.to_model.as_str())
|
||||
.apply()
|
||||
.await
|
||||
{
|
||||
tracing::error!(
|
||||
error = %err,
|
||||
"failed to persist model migration notice acknowledgement"
|
||||
);
|
||||
self.chat_widget.add_error_message(format!(
|
||||
"Failed to persist model migration notice acknowledgement: {err}"
|
||||
));
|
||||
}
|
||||
|
||||
if let Err(err) =
|
||||
codex_tui::model_migration_notice::clear_pending_model_migration_notice(&self.config)
|
||||
.await
|
||||
{
|
||||
tracing::error!(
|
||||
error = %err,
|
||||
"failed to clear pending model migration notice file"
|
||||
);
|
||||
self.chat_widget.add_error_message(format!(
|
||||
"Failed to clear pending model migration notice: {err}"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_schedule_model_migration_notice(&self, used_model: String) {
|
||||
let config = self.config.clone();
|
||||
let models_manager = self.server.get_models_manager();
|
||||
|
||||
tokio::spawn(async move {
|
||||
// Build the candidate migration notice using the current models list (remote if
|
||||
// refreshed, otherwise the startup fallback seeded in `ModelsManager`).
|
||||
let available_models = models_manager.list_models(&config).await;
|
||||
let upgrade = available_models
|
||||
.iter()
|
||||
.find(|preset| preset.model == used_model)
|
||||
.and_then(|preset| preset.upgrade.as_ref());
|
||||
let Some(upgrade) = upgrade else {
|
||||
return;
|
||||
};
|
||||
|
||||
if migration_prompt_hidden(&config, upgrade.migration_config_key.as_str()) {
|
||||
return;
|
||||
}
|
||||
|
||||
let target_model = upgrade.id.clone();
|
||||
if !should_show_model_migration_prompt(
|
||||
used_model.as_str(),
|
||||
target_model.as_str(),
|
||||
&config.notices.model_migrations,
|
||||
&available_models,
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Err(err) =
|
||||
codex_tui::model_migration_notice::write_pending_model_migration_notice(
|
||||
&config,
|
||||
used_model.as_str(),
|
||||
target_model.as_str(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::error!(
|
||||
error = %err,
|
||||
"failed to persist pending model migration notice"
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn run(
|
||||
tui: &mut tui::Tui,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
mut config: Config,
|
||||
config: Config,
|
||||
active_profile: Option<String>,
|
||||
initial_prompt: Option<String>,
|
||||
initial_images: Vec<PathBuf>,
|
||||
@@ -393,30 +393,8 @@ impl App {
|
||||
auth_manager.clone(),
|
||||
SessionSource::Cli,
|
||||
));
|
||||
let mut model = conversation_manager
|
||||
.get_models_manager()
|
||||
.get_model(&config.model, &config)
|
||||
.await;
|
||||
let exit_info = handle_model_migration_prompt_if_needed(
|
||||
tui,
|
||||
&mut config,
|
||||
model.as_str(),
|
||||
&app_event_tx,
|
||||
conversation_manager.get_models_manager(),
|
||||
)
|
||||
.await;
|
||||
if let Some(exit_info) = exit_info {
|
||||
return Ok(exit_info);
|
||||
}
|
||||
if let Some(updated_model) = config.model.clone() {
|
||||
model = updated_model;
|
||||
}
|
||||
|
||||
let enhanced_keys_supported = tui.enhanced_keys_supported();
|
||||
let model_family = conversation_manager
|
||||
.get_models_manager()
|
||||
.construct_model_family(model.as_str(), &config)
|
||||
.await;
|
||||
let mut chat_widget = match resume_selection {
|
||||
ResumeSelection::StartFresh | ResumeSelection::Exit => {
|
||||
let init = crate::chatwidget::ChatWidgetInit {
|
||||
@@ -430,7 +408,6 @@ impl App {
|
||||
models_manager: conversation_manager.get_models_manager(),
|
||||
feedback: feedback.clone(),
|
||||
is_first_run,
|
||||
model_family: model_family.clone(),
|
||||
};
|
||||
ChatWidget::new(init, conversation_manager.clone())
|
||||
}
|
||||
@@ -456,7 +433,6 @@ impl App {
|
||||
models_manager: conversation_manager.get_models_manager(),
|
||||
feedback: feedback.clone(),
|
||||
is_first_run,
|
||||
model_family: model_family.clone(),
|
||||
};
|
||||
ChatWidget::new_from_existing(
|
||||
init,
|
||||
@@ -494,7 +470,7 @@ impl App {
|
||||
chat_widget,
|
||||
auth_manager: auth_manager.clone(),
|
||||
config,
|
||||
current_model: model.clone(),
|
||||
current_model: String::new(),
|
||||
active_profile,
|
||||
file_search,
|
||||
enhanced_keys_supported,
|
||||
@@ -1449,11 +1425,6 @@ impl App {
|
||||
}
|
||||
|
||||
async fn handle_event(&mut self, tui: &mut tui::Tui, event: AppEvent) -> Result<bool> {
|
||||
let model_family = self
|
||||
.server
|
||||
.get_models_manager()
|
||||
.construct_model_family(self.current_model.as_str(), &self.config)
|
||||
.await;
|
||||
match event {
|
||||
AppEvent::NewSession => {
|
||||
let summary = session_summary(
|
||||
@@ -1472,10 +1443,9 @@ impl App {
|
||||
models_manager: self.server.get_models_manager(),
|
||||
feedback: self.feedback.clone(),
|
||||
is_first_run: false,
|
||||
model_family: model_family.clone(),
|
||||
};
|
||||
self.chat_widget = ChatWidget::new(init, self.server.clone());
|
||||
self.current_model = model_family.get_model_slug().to_string();
|
||||
self.current_model.clear();
|
||||
if let Some(summary) = summary {
|
||||
let mut lines: Vec<Line<'static>> = vec![summary.usage_line.clone().into()];
|
||||
if let Some(command) = summary.resume_command {
|
||||
@@ -1522,14 +1492,17 @@ impl App {
|
||||
models_manager: self.server.get_models_manager(),
|
||||
feedback: self.feedback.clone(),
|
||||
is_first_run: false,
|
||||
model_family: model_family.clone(),
|
||||
};
|
||||
self.chat_widget = ChatWidget::new_from_existing(
|
||||
init,
|
||||
resumed.conversation,
|
||||
resumed.session_configured,
|
||||
);
|
||||
self.current_model = model_family.get_model_slug().to_string();
|
||||
self.current_model = self
|
||||
.chat_widget
|
||||
.get_model_family()
|
||||
.map(|mf| mf.get_model_slug().to_string())
|
||||
.unwrap_or_default();
|
||||
if let Some(summary) = summary {
|
||||
let mut lines: Vec<Line<'static>> =
|
||||
vec![summary.usage_line.clone().into()];
|
||||
@@ -1610,12 +1583,21 @@ impl App {
|
||||
self.suppress_shutdown_complete = false;
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let configured_model = match &event.msg {
|
||||
EventMsg::SessionConfigured(ev) => Some(ev.model_family.slug.clone()),
|
||||
_ => None,
|
||||
};
|
||||
if let EventMsg::ListSkillsResponse(response) = &event.msg {
|
||||
let cwd = self.chat_widget.config_ref().cwd.clone();
|
||||
let errors = errors_for_cwd(&cwd, response);
|
||||
emit_skill_load_warnings(&self.app_event_tx, &errors);
|
||||
}
|
||||
self.chat_widget.handle_codex_event(event);
|
||||
|
||||
if let Some(model) = configured_model {
|
||||
self.on_session_configured_model(model).await;
|
||||
}
|
||||
}
|
||||
AppEvent::ConversationHistory(ev) => {
|
||||
self.on_conversation_history_for_backtrack(tui, ev).await?;
|
||||
@@ -1913,24 +1895,6 @@ impl App {
|
||||
));
|
||||
}
|
||||
}
|
||||
AppEvent::PersistModelMigrationPromptAcknowledged {
|
||||
from_model,
|
||||
to_model,
|
||||
} => {
|
||||
if let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
|
||||
.record_model_migration_seen(from_model.as_str(), to_model.as_str())
|
||||
.apply()
|
||||
.await
|
||||
{
|
||||
tracing::error!(
|
||||
error = %err,
|
||||
"failed to persist model migration prompt acknowledgement"
|
||||
);
|
||||
self.chat_widget.add_error_message(format!(
|
||||
"Failed to save model migration prompt preference: {err}"
|
||||
));
|
||||
}
|
||||
}
|
||||
AppEvent::OpenApprovalsPopup => {
|
||||
self.chat_widget.open_approvals_popup();
|
||||
}
|
||||
@@ -2205,10 +2169,41 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
|
||||
fn test_model_family(slug: &str) -> codex_protocol::openai_models::ModelFamily {
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
|
||||
ModelFamily {
|
||||
slug: slug.to_string(),
|
||||
family: slug.to_string(),
|
||||
needs_special_apply_patch_instructions: false,
|
||||
context_window: None,
|
||||
auto_compact_token_limit: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_effort: Some(ReasoningEffort::Medium),
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
supports_parallel_tool_calls: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: String::new(),
|
||||
experimental_supported_tools: Vec::new(),
|
||||
effective_context_window_percent: 95,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
shell_type: ConfigShellToolType::Default,
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
}
|
||||
}
|
||||
|
||||
async fn make_test_app() -> App {
|
||||
let (chat_widget, app_event_tx, _rx, _op_rx) = make_chatwidget_manual_with_sender().await;
|
||||
let config = chat_widget.config_ref().clone();
|
||||
let current_model = chat_widget.get_model_family().get_model_slug().to_string();
|
||||
let current_model = chat_widget
|
||||
.get_model_family()
|
||||
.map(|mf| mf.get_model_slug().to_string())
|
||||
.unwrap_or_default();
|
||||
let server = Arc::new(ConversationManager::with_models_provider(
|
||||
CodexAuth::from_api_key("Test API Key"),
|
||||
config.model_provider.clone(),
|
||||
@@ -2256,7 +2251,10 @@ mod tests {
|
||||
) {
|
||||
let (chat_widget, app_event_tx, rx, op_rx) = make_chatwidget_manual_with_sender().await;
|
||||
let config = chat_widget.config_ref().clone();
|
||||
let current_model = chat_widget.get_model_family().get_model_slug().to_string();
|
||||
let current_model = chat_widget
|
||||
.get_model_family()
|
||||
.map(|mf| mf.get_model_slug().to_string())
|
||||
.unwrap_or_default();
|
||||
let server = Arc::new(ConversationManager::with_models_provider(
|
||||
CodexAuth::from_api_key("Test API Key"),
|
||||
config.model_provider.clone(),
|
||||
@@ -2426,7 +2424,7 @@ mod tests {
|
||||
let make_header = |is_first| {
|
||||
let event = SessionConfiguredEvent {
|
||||
session_id: ConversationId::new(),
|
||||
model: "gpt-test".to_string(),
|
||||
model_family: test_model_family("gpt-test"),
|
||||
model_provider_id: "test-provider".to_string(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
@@ -2721,7 +2719,7 @@ mod tests {
|
||||
let conversation_id = ConversationId::new();
|
||||
let event = SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model: "gpt-test".to_string(),
|
||||
model_family: test_model_family("gpt-test"),
|
||||
model_provider_id: "test-provider".to_string(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
|
||||
@@ -338,10 +338,9 @@ impl App {
|
||||
) {
|
||||
let conv = new_conv.conversation;
|
||||
let session_configured = new_conv.session_configured;
|
||||
let model_family = self.chat_widget.get_model_family();
|
||||
let current_model = session_configured.model_family.slug.clone();
|
||||
let init = crate::chatwidget::ChatWidgetInit {
|
||||
config: cfg,
|
||||
model_family: model_family.clone(),
|
||||
frame_requester: tui.frame_requester(),
|
||||
app_event_tx: self.app_event_tx.clone(),
|
||||
initial_prompt: None,
|
||||
@@ -354,7 +353,7 @@ impl App {
|
||||
};
|
||||
self.chat_widget =
|
||||
crate::chatwidget::ChatWidget::new_from_existing(init, conv, session_configured);
|
||||
self.current_model = model_family.get_model_slug().to_string();
|
||||
self.current_model = current_model;
|
||||
// Trim transcript up to the selected user message and re-render it.
|
||||
self.trim_transcript_for_backtrack(nth_user_message);
|
||||
self.render_transcript_once(tui);
|
||||
|
||||
@@ -137,12 +137,6 @@ pub(crate) enum AppEvent {
|
||||
/// Persist the acknowledgement flag for the rate limit switch prompt.
|
||||
PersistRateLimitSwitchPromptHidden,
|
||||
|
||||
/// Persist the acknowledgement flag for the model migration prompt.
|
||||
PersistModelMigrationPromptAcknowledged {
|
||||
from_model: String,
|
||||
to_model: String,
|
||||
},
|
||||
|
||||
/// Skip the next world-writable scan (one-shot) after a user-confirmed continue.
|
||||
#[cfg_attr(not(target_os = "windows"), allow(dead_code))]
|
||||
SkipNextWorldWritableScan,
|
||||
|
||||
@@ -13,7 +13,6 @@ use codex_core::config::types::Notifications;
|
||||
use codex_core::git_info::current_branch_name;
|
||||
use codex_core::git_info::local_git_branches;
|
||||
use codex_core::models_manager::manager::ModelsManager;
|
||||
use codex_core::models_manager::model_family::ModelFamily;
|
||||
use codex_core::project_doc::DEFAULT_PROJECT_DOC_FILENAME;
|
||||
use codex_core::protocol::AgentMessageDeltaEvent;
|
||||
use codex_core::protocol::AgentMessageEvent;
|
||||
@@ -65,6 +64,7 @@ use codex_core::skills::model::SkillMetadata;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::account::PlanType;
|
||||
use codex_protocol::approvals::ElicitationRequestEvent;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use codex_protocol::parse_command::ParsedCommand;
|
||||
use codex_protocol::user_input::UserInput;
|
||||
use crossterm::event::KeyCode;
|
||||
@@ -267,7 +267,6 @@ pub(crate) struct ChatWidgetInit {
|
||||
pub(crate) models_manager: Arc<ModelsManager>,
|
||||
pub(crate) feedback: codex_feedback::CodexFeedback,
|
||||
pub(crate) is_first_run: bool,
|
||||
pub(crate) model_family: ModelFamily,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
@@ -284,7 +283,7 @@ pub(crate) struct ChatWidget {
|
||||
bottom_pane: BottomPane,
|
||||
active_cell: Option<Box<dyn HistoryCell>>,
|
||||
config: Config,
|
||||
model_family: ModelFamily,
|
||||
model_family: Option<ModelFamily>,
|
||||
auth_manager: Arc<AuthManager>,
|
||||
models_manager: Arc<ModelsManager>,
|
||||
session_header: SessionHeader,
|
||||
@@ -398,7 +397,12 @@ impl ChatWidget {
|
||||
self.conversation_id = Some(event.session_id);
|
||||
self.current_rollout_path = Some(event.rollout_path.clone());
|
||||
let initial_messages = event.initial_messages.clone();
|
||||
let model_for_header = event.model.clone();
|
||||
let model_family = codex_core::models_manager::model_family::with_config_overrides(
|
||||
event.model_family.clone(),
|
||||
&self.config,
|
||||
);
|
||||
let model_for_header = model_family.get_model_slug().to_string();
|
||||
self.model_family = Some(model_family);
|
||||
self.session_header.set_model(&model_for_header);
|
||||
self.add_to_history(history_cell::new_session_info(
|
||||
&self.config,
|
||||
@@ -415,9 +419,15 @@ impl ChatWidget {
|
||||
cwds: Vec::new(),
|
||||
force_reload: false,
|
||||
});
|
||||
let had_initial_message = self.initial_user_message.is_some();
|
||||
if let Some(user_message) = self.initial_user_message.take() {
|
||||
self.submit_user_message(user_message);
|
||||
}
|
||||
if !had_initial_message {
|
||||
// If there are queued inputs from startup, begin the first turn now.
|
||||
// Subsequent queued inputs are sent turn-by-turn via `maybe_send_next_queued_input`.
|
||||
self.maybe_send_next_queued_input();
|
||||
}
|
||||
if !self.suppress_session_configured_redraw {
|
||||
self.request_redraw();
|
||||
}
|
||||
@@ -496,7 +506,11 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
fn on_agent_reasoning_final(&mut self) {
|
||||
let reasoning_summary_format = self.get_model_family().reasoning_summary_format;
|
||||
let reasoning_summary_format = self
|
||||
.model_family
|
||||
.as_ref()
|
||||
.map(|mf| mf.reasoning_summary_format.clone())
|
||||
.unwrap_or_default();
|
||||
// At the end of a reasoning block, record transcript-only content.
|
||||
self.full_reasoning_buffer.push_str(&self.reasoning_buffer);
|
||||
if !self.full_reasoning_buffer.is_empty() {
|
||||
@@ -570,7 +584,7 @@ impl ChatWidget {
|
||||
|
||||
fn context_remaining_percent(&self, info: &TokenUsageInfo) -> Option<i64> {
|
||||
info.model_context_window
|
||||
.or(self.model_family.context_window)
|
||||
.or(self.model_family.as_ref().and_then(|mf| mf.context_window))
|
||||
.map(|window| {
|
||||
info.last_token_usage
|
||||
.percent_of_context_window_remaining(window)
|
||||
@@ -642,7 +656,10 @@ impl ChatWidget {
|
||||
|
||||
if high_usage
|
||||
&& !self.rate_limit_switch_prompt_hidden()
|
||||
&& self.model_family.get_model_slug() != NUDGE_MODEL_SLUG
|
||||
&& self
|
||||
.model_family
|
||||
.as_ref()
|
||||
.is_some_and(|mf| mf.get_model_slug() != NUDGE_MODEL_SLUG)
|
||||
&& !matches!(
|
||||
self.rate_limit_switch_prompt,
|
||||
RateLimitSwitchPromptState::Shown
|
||||
@@ -676,7 +693,7 @@ impl ChatWidget {
|
||||
self.stream_controller = None;
|
||||
self.maybe_show_pending_rate_limit_prompt();
|
||||
}
|
||||
pub(crate) fn get_model_family(&self) -> ModelFamily {
|
||||
pub(crate) fn get_model_family(&self) -> Option<ModelFamily> {
|
||||
self.model_family.clone()
|
||||
}
|
||||
|
||||
@@ -1276,11 +1293,8 @@ impl ChatWidget {
|
||||
models_manager,
|
||||
feedback,
|
||||
is_first_run,
|
||||
model_family,
|
||||
} = common;
|
||||
let model_slug = model_family.get_model_slug().to_string();
|
||||
let mut config = config;
|
||||
config.model = Some(model_slug.clone());
|
||||
let config = config;
|
||||
let mut rng = rand::rng();
|
||||
let placeholder = EXAMPLE_PROMPTS[rng.random_range(0..EXAMPLE_PROMPTS.len())].to_string();
|
||||
let codex_op_tx = spawn_agent(config.clone(), app_event_tx.clone(), conversation_manager);
|
||||
@@ -1301,10 +1315,10 @@ impl ChatWidget {
|
||||
}),
|
||||
active_cell: None,
|
||||
config,
|
||||
model_family,
|
||||
model_family: None,
|
||||
auth_manager,
|
||||
models_manager,
|
||||
session_header: SessionHeader::new(model_slug),
|
||||
session_header: SessionHeader::new("Starting...".to_string()),
|
||||
initial_user_message: create_initial_user_message(
|
||||
initial_prompt.unwrap_or_default(),
|
||||
initial_images,
|
||||
@@ -1360,9 +1374,12 @@ impl ChatWidget {
|
||||
auth_manager,
|
||||
models_manager,
|
||||
feedback,
|
||||
model_family,
|
||||
..
|
||||
} = common;
|
||||
let model_family = codex_core::models_manager::model_family::with_config_overrides(
|
||||
session_configured.model_family.clone(),
|
||||
&config,
|
||||
);
|
||||
let model_slug = model_family.get_model_slug().to_string();
|
||||
let mut rng = rand::rng();
|
||||
let placeholder = EXAMPLE_PROMPTS[rng.random_range(0..EXAMPLE_PROMPTS.len())].to_string();
|
||||
@@ -1386,7 +1403,7 @@ impl ChatWidget {
|
||||
}),
|
||||
active_cell: None,
|
||||
config,
|
||||
model_family,
|
||||
model_family: Some(model_family),
|
||||
auth_manager,
|
||||
models_manager,
|
||||
session_header: SessionHeader::new(model_slug),
|
||||
@@ -1554,7 +1571,7 @@ impl ChatWidget {
|
||||
return;
|
||||
}
|
||||
const INIT_PROMPT: &str = include_str!("../prompt_for_init_command.md");
|
||||
self.submit_user_message(INIT_PROMPT.to_string().into());
|
||||
self.queue_user_message(INIT_PROMPT.to_string().into());
|
||||
}
|
||||
SlashCommand::Compact => {
|
||||
self.clear_token_usage();
|
||||
@@ -1564,7 +1581,14 @@ impl ChatWidget {
|
||||
self.open_review_popup();
|
||||
}
|
||||
SlashCommand::Model => {
|
||||
self.open_model_popup();
|
||||
if self.model_family.is_none() {
|
||||
self.add_info_message(
|
||||
"`/model` is unavailable until startup finishes.".to_string(),
|
||||
None,
|
||||
);
|
||||
} else {
|
||||
self.open_model_popup();
|
||||
}
|
||||
}
|
||||
SlashCommand::Approvals => {
|
||||
self.open_approvals_popup();
|
||||
@@ -1707,7 +1731,7 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
fn queue_user_message(&mut self, user_message: UserMessage) {
|
||||
if self.bottom_pane.is_task_running() {
|
||||
if self.conversation_id.is_none() || self.bottom_pane.is_task_running() {
|
||||
self.queued_user_messages.push_back(user_message);
|
||||
self.refresh_queued_user_messages();
|
||||
} else {
|
||||
@@ -2016,7 +2040,7 @@ impl ChatWidget {
|
||||
|
||||
// If idle and there are queued inputs, submit exactly one to start the next turn.
|
||||
fn maybe_send_next_queued_input(&mut self) {
|
||||
if self.bottom_pane.is_task_running() {
|
||||
if self.conversation_id.is_none() || self.bottom_pane.is_task_running() {
|
||||
return;
|
||||
}
|
||||
if let Some(user_message) = self.queued_user_messages.pop_front() {
|
||||
@@ -2045,6 +2069,14 @@ impl ChatWidget {
|
||||
}
|
||||
|
||||
pub(crate) fn add_status_output(&mut self) {
|
||||
let Some(model_family) = self.model_family.as_ref() else {
|
||||
self.add_info_message(
|
||||
"`/status` is unavailable until startup finishes.".to_string(),
|
||||
None,
|
||||
);
|
||||
return;
|
||||
};
|
||||
|
||||
let default_usage = TokenUsage::default();
|
||||
let (total_usage, context_usage) = if let Some(ti) = &self.token_info {
|
||||
(&ti.total_token_usage, Some(&ti.last_token_usage))
|
||||
@@ -2054,14 +2086,14 @@ impl ChatWidget {
|
||||
self.add_to_history(crate::status::new_status_output(
|
||||
&self.config,
|
||||
self.auth_manager.as_ref(),
|
||||
&self.model_family,
|
||||
model_family,
|
||||
total_usage,
|
||||
context_usage,
|
||||
&self.conversation_id,
|
||||
self.rate_limit_snapshot.as_ref(),
|
||||
self.plan_type,
|
||||
Local::now(),
|
||||
self.model_family.get_model_slug(),
|
||||
model_family.get_model_slug(),
|
||||
));
|
||||
}
|
||||
fn stop_rate_limit_poller(&mut self) {
|
||||
@@ -2204,7 +2236,14 @@ impl ChatWidget {
|
||||
/// Open a popup to choose a quick auto model. Selecting "All models"
|
||||
/// opens the full picker with every available preset.
|
||||
pub(crate) fn open_model_popup(&mut self) {
|
||||
let current_model = self.model_family.get_model_slug().to_string();
|
||||
let Some(model_family) = self.model_family.as_ref() else {
|
||||
self.add_info_message(
|
||||
"`/model` is unavailable until startup finishes.".to_string(),
|
||||
None,
|
||||
);
|
||||
return;
|
||||
};
|
||||
let current_model = model_family.get_model_slug().to_string();
|
||||
let presets: Vec<ModelPreset> =
|
||||
// todo(aibrahim): make this async function
|
||||
match self.models_manager.try_list_models(&self.config) {
|
||||
@@ -2312,7 +2351,11 @@ impl ChatWidget {
|
||||
return;
|
||||
}
|
||||
|
||||
let current_model = self.model_family.get_model_slug().to_string();
|
||||
let current_model = self
|
||||
.model_family
|
||||
.as_ref()
|
||||
.map(|mf| mf.get_model_slug().to_string())
|
||||
.unwrap_or_default();
|
||||
let mut items: Vec<SelectionItem> = Vec::new();
|
||||
for preset in presets.into_iter() {
|
||||
let description =
|
||||
@@ -2443,7 +2486,10 @@ impl ChatWidget {
|
||||
.or(Some(default_effort));
|
||||
|
||||
let model_slug = preset.model.to_string();
|
||||
let is_current_model = self.model_family.get_model_slug() == preset.model;
|
||||
let is_current_model = self
|
||||
.model_family
|
||||
.as_ref()
|
||||
.is_some_and(|mf| mf.get_model_slug() == preset.model);
|
||||
let highlight_choice = if is_current_model {
|
||||
self.config.model_reasoning_effort
|
||||
} else {
|
||||
@@ -3004,7 +3050,7 @@ impl ChatWidget {
|
||||
/// Set the model in the widget's config copy.
|
||||
pub(crate) fn set_model(&mut self, model: &str, model_family: ModelFamily) {
|
||||
self.session_header.set_model(model);
|
||||
self.model_family = model_family;
|
||||
self.model_family = Some(model_family);
|
||||
}
|
||||
|
||||
pub(crate) fn add_info_message(&mut self, message: String, hint: Option<String>) {
|
||||
|
||||
@@ -95,6 +95,34 @@ fn snapshot(percent: f64) -> RateLimitSnapshot {
|
||||
}
|
||||
}
|
||||
|
||||
fn test_model_family(slug: &str) -> ModelFamily {
|
||||
use codex_protocol::openai_models::ConfigShellToolType;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use codex_protocol::openai_models::ReasoningSummaryFormat;
|
||||
use codex_protocol::openai_models::TruncationPolicy;
|
||||
|
||||
ModelFamily {
|
||||
slug: slug.to_string(),
|
||||
family: slug.to_string(),
|
||||
needs_special_apply_patch_instructions: false,
|
||||
context_window: None,
|
||||
auto_compact_token_limit: None,
|
||||
supports_reasoning_summaries: false,
|
||||
default_reasoning_effort: Some(ReasoningEffort::default()),
|
||||
reasoning_summary_format: ReasoningSummaryFormat::None,
|
||||
supports_parallel_tool_calls: false,
|
||||
apply_patch_tool_type: None,
|
||||
base_instructions: String::new(),
|
||||
experimental_supported_tools: Vec::new(),
|
||||
effective_context_window_percent: 95,
|
||||
support_verbosity: false,
|
||||
default_verbosity: None,
|
||||
shell_type: ConfigShellToolType::Default,
|
||||
truncation_policy: TruncationPolicy::Bytes(10_000),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn resumed_initial_messages_render_history() {
|
||||
let (mut chat, mut rx, _ops) = make_chatwidget_manual(None).await;
|
||||
@@ -103,7 +131,7 @@ async fn resumed_initial_messages_render_history() {
|
||||
let rollout_file = NamedTempFile::new().unwrap();
|
||||
let configured = codex_core::protocol::SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model: "test-model".to_string(),
|
||||
model_family: test_model_family("test-model"),
|
||||
model_provider_id: "test-provider".to_string(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
@@ -310,8 +338,6 @@ async fn helpers_are_available_and_do_not_panic() {
|
||||
let (tx_raw, _rx) = unbounded_channel::<AppEvent>();
|
||||
let tx = AppEventSender::new(tx_raw);
|
||||
let cfg = test_config().await;
|
||||
let resolved_model = ModelsManager::get_model_offline(cfg.model.as_deref());
|
||||
let model_family = ModelsManager::construct_model_family_offline(&resolved_model, &cfg);
|
||||
let conversation_manager = Arc::new(ConversationManager::with_models_provider(
|
||||
CodexAuth::from_api_key("test"),
|
||||
cfg.model_provider.clone(),
|
||||
@@ -328,7 +354,6 @@ async fn helpers_are_available_and_do_not_panic() {
|
||||
models_manager: conversation_manager.get_models_manager(),
|
||||
feedback: codex_feedback::CodexFeedback::new(),
|
||||
is_first_run: true,
|
||||
model_family,
|
||||
};
|
||||
let mut w = ChatWidget::new(init, conversation_manager);
|
||||
// Basic construction sanity.
|
||||
@@ -370,7 +395,10 @@ async fn make_chatwidget_manual(
|
||||
bottom_pane: bottom,
|
||||
active_cell: None,
|
||||
config: cfg.clone(),
|
||||
model_family: ModelsManager::construct_model_family_offline(&resolved_model, &cfg),
|
||||
model_family: Some(ModelsManager::construct_model_family_offline(
|
||||
&resolved_model,
|
||||
&cfg,
|
||||
)),
|
||||
auth_manager: auth_manager.clone(),
|
||||
models_manager: Arc::new(ModelsManager::new(auth_manager)),
|
||||
session_header: SessionHeader::new(resolved_model.clone()),
|
||||
@@ -983,6 +1011,27 @@ async fn alt_up_edits_most_recent_queued_message() {
|
||||
async fn enqueueing_history_prompt_multiple_times_is_stable() {
|
||||
let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await;
|
||||
|
||||
// Ensure the session is configured so the first submission is sent (not queued),
|
||||
// which seeds the prompt history for the subsequent Up-arrow recalls.
|
||||
let conversation_id = ConversationId::new();
|
||||
let rollout_file = NamedTempFile::new().unwrap();
|
||||
chat.handle_codex_event(Event {
|
||||
id: "configured".into(),
|
||||
msg: EventMsg::SessionConfigured(codex_core::protocol::SessionConfiguredEvent {
|
||||
session_id: conversation_id,
|
||||
model_family: test_model_family("test-model"),
|
||||
model_provider_id: "test-provider".to_string(),
|
||||
approval_policy: AskForApproval::Never,
|
||||
sandbox_policy: SandboxPolicy::ReadOnly,
|
||||
cwd: PathBuf::from("/home/user/project"),
|
||||
reasoning_effort: Some(ReasoningEffortConfig::default()),
|
||||
history_log_id: 0,
|
||||
history_entry_count: 0,
|
||||
initial_messages: None,
|
||||
rollout_path: rollout_file.path().to_path_buf(),
|
||||
}),
|
||||
});
|
||||
|
||||
// Submit an initial prompt to seed history.
|
||||
chat.bottom_pane.set_composer_text("repeat me".to_string());
|
||||
chat.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE));
|
||||
|
||||
@@ -626,13 +626,14 @@ pub(crate) fn new_session_info(
|
||||
is_first_event: bool,
|
||||
) -> SessionInfoCell {
|
||||
let SessionConfiguredEvent {
|
||||
model,
|
||||
model_family,
|
||||
reasoning_effort,
|
||||
..
|
||||
} = event;
|
||||
let used_model = model_family.slug;
|
||||
// Header box rendered as history (so it appears at the very top)
|
||||
let header = SessionHeaderHistoryCell::new(
|
||||
model.clone(),
|
||||
used_model.clone(),
|
||||
reasoning_effort,
|
||||
config.cwd.clone(),
|
||||
CODEX_CLI_VERSION,
|
||||
@@ -680,11 +681,11 @@ pub(crate) fn new_session_info(
|
||||
{
|
||||
parts.push(Box::new(tooltips));
|
||||
}
|
||||
if requested_model != model {
|
||||
if requested_model != used_model {
|
||||
let lines = vec![
|
||||
"model changed:".magenta().bold().into(),
|
||||
format!("requested: {requested_model}").into(),
|
||||
format!("used: {model}").into(),
|
||||
format!("used: {used_model}").into(),
|
||||
];
|
||||
parts.push(Box::new(PlainHistoryCell { lines }));
|
||||
}
|
||||
|
||||
@@ -58,6 +58,8 @@ pub mod live_wrap;
|
||||
mod markdown;
|
||||
mod markdown_render;
|
||||
mod markdown_stream;
|
||||
// Model migration prompt UI is no longer used in production; keep it for snapshot tests.
|
||||
#[cfg(test)]
|
||||
mod model_migration;
|
||||
mod notifications;
|
||||
pub mod onboarding;
|
||||
@@ -77,8 +79,6 @@ mod style;
|
||||
mod terminal_palette;
|
||||
mod text_formatting;
|
||||
mod tooltips;
|
||||
mod transcript_copy;
|
||||
mod transcript_selection;
|
||||
mod tui;
|
||||
mod ui_consts;
|
||||
pub mod update_action;
|
||||
@@ -280,10 +280,7 @@ pub async fn run_main(
|
||||
|
||||
let file_layer = tracing_subscriber::fmt::layer()
|
||||
.with_writer(non_blocking)
|
||||
// `with_target(true)` is the default, but we previously disabled it for file output.
|
||||
// Keep it enabled so we can selectively enable targets via `RUST_LOG=...` and then
|
||||
// grep for a specific module/target while troubleshooting.
|
||||
.with_target(true)
|
||||
.with_target(false)
|
||||
.with_ansi(false)
|
||||
.with_span_events(tracing_subscriber::fmt::format::FmtSpan::CLOSE)
|
||||
.with_filter(env_filter());
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::key_hint;
|
||||
use crate::render::Insets;
|
||||
use crate::render::renderable::ColumnRenderable;
|
||||
use crate::render::renderable::Renderable;
|
||||
use crate::render::renderable::RenderableExt as _;
|
||||
use crate::selection_list::selection_option_row;
|
||||
|
||||
use crate::tui::FrameRequester;
|
||||
use crate::tui::Tui;
|
||||
use crate::tui::TuiEvent;
|
||||
|
||||
@@ -7,12 +7,12 @@ use chrono::DateTime;
|
||||
use chrono::Local;
|
||||
use codex_common::create_config_summary_entries;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::models_manager::model_family::ModelFamily;
|
||||
use codex_core::protocol::NetworkAccess;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use codex_protocol::ConversationId;
|
||||
use codex_protocol::account::PlanType;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use ratatui::prelude::*;
|
||||
use ratatui::style::Stylize;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
@@ -8,13 +8,13 @@ use codex_core::AuthManager;
|
||||
use codex_core::config::Config;
|
||||
use codex_core::config::ConfigBuilder;
|
||||
use codex_core::models_manager::manager::ModelsManager;
|
||||
use codex_core::models_manager::model_family::ModelFamily;
|
||||
use codex_core::protocol::CreditsSnapshot;
|
||||
use codex_core::protocol::RateLimitSnapshot;
|
||||
use codex_core::protocol::RateLimitWindow;
|
||||
use codex_core::protocol::SandboxPolicy;
|
||||
use codex_core::protocol::TokenUsage;
|
||||
use codex_protocol::config_types::ReasoningSummary;
|
||||
use codex_protocol::openai_models::ModelFamily;
|
||||
use codex_protocol::openai_models::ReasoningEffort;
|
||||
use insta::assert_snapshot;
|
||||
use ratatui::prelude::*;
|
||||
|
||||
@@ -25,7 +25,8 @@ pub struct VT100Backend {
|
||||
impl VT100Backend {
|
||||
/// Creates a new `TestBackend` with the specified width and height.
|
||||
pub fn new(width: u16, height: u16) -> Self {
|
||||
crossterm::style::Colored::set_ansi_color_disabled(false);
|
||||
// Force ANSI color output even when the writer isn't a real TTY (e.g., vt100::Parser in tests).
|
||||
crossterm::style::force_color_output(true);
|
||||
Self {
|
||||
crossterm_backend: CrosstermBackend::new(vt100::Parser::new(height, width, 0)),
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user