Compare commits

...

1 Commits

Author SHA1 Message Date
Dylan Hurd
e410222b95 chore(feature) rm Feature::Personality 2026-02-09 11:49:25 -05:00
20 changed files with 72 additions and 271 deletions

View File

@@ -67,7 +67,7 @@ async fn turn_start_sends_originator_header() -> Result<()> {
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
&BTreeMap::from([]),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
@@ -142,7 +142,7 @@ async fn turn_start_emits_user_message_item_with_text_elements() -> Result<()> {
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
&BTreeMap::from([]),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
@@ -234,7 +234,7 @@ async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
&BTreeMap::from([]),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
@@ -431,7 +431,7 @@ async fn turn_start_accepts_personality_override_v2() -> Result<()> {
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
&BTreeMap::from([]),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
@@ -512,7 +512,7 @@ async fn turn_start_change_personality_mid_thread_v2() -> Result<()> {
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
&BTreeMap::from([]),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
@@ -618,7 +618,7 @@ async fn turn_start_uses_migrated_pragmatic_personality_without_override_v2() ->
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::Personality, true)]),
&BTreeMap::from([]),
)?;
create_fake_rollout(
codex_home.path(),

View File

@@ -224,9 +224,6 @@
"memory_tool": {
"type": "boolean"
},
"personality": {
"type": "boolean"
},
"powershell_utf8": {
"type": "boolean"
},
@@ -1267,9 +1264,6 @@
"memory_tool": {
"type": "boolean"
},
"personality": {
"type": "boolean"
},
"powershell_utf8": {
"type": "boolean"
},

View File

@@ -1,5 +1,4 @@
use crate::client_common::tools::ToolSpec;
use crate::config::types::Personality;
use crate::error::Result;
pub use codex_api::common::ResponseEvent;
use codex_protocol::models::BaseInstructions;
@@ -37,9 +36,6 @@ pub struct Prompt {
pub base_instructions: BaseInstructions,
/// Optionally specify the personality of the model.
pub personality: Option<Personality>,
/// Optional the output schema for the model's response.
pub output_schema: Option<Value>,
}

View File

@@ -522,7 +522,7 @@ pub(crate) struct TurnContext {
pub(crate) compact_prompt: Option<String>,
pub(crate) user_instructions: Option<String>,
pub(crate) collaboration_mode: CollaborationMode,
pub(crate) personality: Option<Personality>,
pub(crate) personality: Personality,
pub(crate) approval_policy: AskForApproval,
pub(crate) sandbox_policy: SandboxPolicy,
pub(crate) windows_sandbox_level: WindowsSandboxLevel,
@@ -605,7 +605,7 @@ pub(crate) struct SessionConfiguration {
user_instructions: Option<String>,
/// Personality preference for the model.
personality: Option<Personality>,
personality: Personality,
/// Base instructions for the session.
base_instructions: String,
@@ -666,7 +666,7 @@ impl SessionConfiguration {
next_configuration.model_reasoning_summary = summary;
}
if let Some(personality) = updates.personality {
next_configuration.personality = Some(personality);
next_configuration.personality = personality;
}
if let Some(approval_policy) = updates.approval_policy {
next_configuration.approval_policy.set(approval_policy)?;
@@ -1564,20 +1564,14 @@ impl Session {
previous: Option<&Arc<TurnContext>>,
next: &TurnContext,
) -> Option<ResponseItem> {
if !self.features.enabled(Feature::Personality) {
return None;
}
let previous = previous?;
if next.model_info.slug != previous.model_info.slug {
return None;
}
// if a personality is specified and it's different from the previous one, build a personality update item
if let Some(personality) = next.personality
&& next.personality != previous.personality
{
if next.personality != previous.personality {
let model_info = &next.model_info;
let personality_message = Self::personality_message_for(model_info, personality);
let personality_message = Self::personality_message_for(model_info, next.personality);
personality_message.map(|personality_message| {
DeveloperInstructions::personality_spec_message(personality_message).into()
})
@@ -1590,7 +1584,7 @@ impl Session {
model_info
.model_messages
.as_ref()
.and_then(|spec| spec.get_personality_message(Some(personality)))
.and_then(|spec| spec.get_personality_message(personality))
.filter(|message| !message.is_empty())
}
@@ -2180,21 +2174,17 @@ impl Session {
{
items.push(collab_instructions.into());
}
if self.features.enabled(Feature::Personality)
&& let Some(personality) = turn_context.personality
let model_info = turn_context.model_info.clone();
let has_baked_personality = model_info.supports_personality()
&& base_instructions == model_info.get_model_instructions(turn_context.personality);
if !has_baked_personality
&& let Some(personality_message) =
Self::personality_message_for(&model_info, turn_context.personality)
{
let model_info = turn_context.model_info.clone();
let has_baked_personality = model_info.supports_personality()
&& base_instructions == model_info.get_model_instructions(Some(personality));
if !has_baked_personality
&& let Some(personality_message) =
Self::personality_message_for(&model_info, personality)
{
items.push(
DeveloperInstructions::personality_spec_message(personality_message).into(),
);
}
items.push(DeveloperInstructions::personality_spec_message(personality_message).into());
}
if let Some(user_instructions) = turn_context.user_instructions.as_deref() {
items.push(
UserInstructions {
@@ -4136,7 +4126,6 @@ async fn run_sampling_request(
tools: router.specs(),
parallel_tool_calls: model_supports_parallel,
base_instructions,
personality: turn_context.personality,
output_schema: turn_context.final_output_json_schema.clone(),
};
@@ -4644,7 +4633,7 @@ async fn try_run_sampling_request(
approval_policy: turn_context.approval_policy,
sandbox_policy: turn_context.sandbox_policy.clone(),
model: turn_context.model_info.slug.clone(),
personality: turn_context.personality,
personality: Some(turn_context.personality),
collaboration_mode: Some(collaboration_mode),
effort: turn_context.reasoning_effort,
summary: turn_context.reasoning_summary,

View File

@@ -24,7 +24,7 @@ pub struct ThreadConfigSnapshot {
pub sandbox_policy: SandboxPolicy,
pub cwd: PathBuf,
pub reasoning_effort: Option<ReasoningEffort>,
pub personality: Option<Personality>,
pub personality: Personality,
pub session_source: SessionSource,
}

View File

@@ -99,7 +99,7 @@ async fn run_compact_task_inner(
approval_policy: turn_context.approval_policy,
sandbox_policy: turn_context.sandbox_policy.clone(),
model: turn_context.model_info.slug.clone(),
personality: turn_context.personality,
personality: Some(turn_context.personality),
collaboration_mode: Some(collaboration_mode),
effort: turn_context.reasoning_effort,
summary: turn_context.reasoning_summary,
@@ -117,7 +117,6 @@ async fn run_compact_task_inner(
let prompt = Prompt {
input: turn_input,
base_instructions: sess.get_base_instructions().await,
personality: turn_context.personality,
..Default::default()
};
let attempt_result = drain_to_completed(

View File

@@ -86,7 +86,6 @@ async fn run_remote_compact_task_inner_impl(
tools: vec![],
parallel_tool_calls: false,
base_instructions,
personality: turn_context.personality,
output_schema: None,
};

View File

@@ -140,8 +140,8 @@ pub struct Config {
/// Info needed to make an API request to the model.
pub model_provider: ModelProviderInfo,
/// Optionally specify the personality of the model
pub personality: Option<Personality>,
/// How the model should communicate
pub personality: Personality,
/// Approval policy for executing commands.
pub approval_policy: Constrained<AskForApproval>,
@@ -1618,11 +1618,7 @@ impl Config {
let personality = personality
.or(config_profile.personality)
.or(cfg.personality)
.or_else(|| {
features
.enabled(Feature::Personality)
.then_some(Personality::Pragmatic)
});
.unwrap_or_default();
let experimental_compact_prompt_path = config_profile
.experimental_compact_prompt_file
@@ -4038,7 +4034,7 @@ model_verbosity = "high"
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: None,
model_verbosity: None,
personality: Some(Personality::Pragmatic),
personality: Personality::Pragmatic,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
developer_instructions: None,
@@ -4127,7 +4123,7 @@ model_verbosity = "high"
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: None,
model_verbosity: None,
personality: Some(Personality::Pragmatic),
personality: Personality::Pragmatic,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
developer_instructions: None,
@@ -4231,7 +4227,7 @@ model_verbosity = "high"
model_reasoning_summary: ReasoningSummary::default(),
model_supports_reasoning_summaries: None,
model_verbosity: None,
personality: Some(Personality::Pragmatic),
personality: Personality::Pragmatic,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
developer_instructions: None,
@@ -4321,7 +4317,7 @@ model_verbosity = "high"
model_reasoning_summary: ReasoningSummary::Detailed,
model_supports_reasoning_summaries: None,
model_verbosity: Some(Verbosity::High),
personality: Some(Personality::Pragmatic),
personality: Personality::Pragmatic,
chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(),
base_instructions: None,
developer_instructions: None,

View File

@@ -88,9 +88,8 @@ impl ContextManager {
// This is a coarse lower bound, not a tokenizer-accurate count.
pub(crate) fn estimate_token_count(&self, turn_context: &TurnContext) -> Option<i64> {
let model_info = &turn_context.model_info;
let personality = turn_context.personality.or(turn_context.config.personality);
let base_instructions = BaseInstructions {
text: model_info.get_model_instructions(personality),
text: model_info.get_model_instructions(turn_context.personality),
};
self.estimate_token_count_with_base_instructions(&base_instructions)
}

View File

@@ -123,8 +123,6 @@ pub enum Feature {
Steer,
/// Enable collaboration modes (Plan, Default).
CollaborationModes,
/// Enable personality selection in the TUI.
Personality,
/// Use the Responses API WebSocket transport for OpenAI by default.
ResponsesWebsockets,
/// Enable Responses API websocket v2 mode.
@@ -551,12 +549,6 @@ pub const FEATURES: &[FeatureSpec] = &[
stage: Stage::Stable,
default_enabled: true,
},
FeatureSpec {
id: Feature::Personality,
key: "personality",
stage: Stage::Stable,
default_enabled: true,
},
FeatureSpec {
id: Feature::ResponsesWebsockets,
key: "responses_websockets",

View File

@@ -12,7 +12,6 @@ use codex_protocol::openai_models::TruncationPolicyConfig;
use codex_protocol::openai_models::default_input_modalities;
use crate::config::Config;
use crate::features::Feature;
use crate::truncate::approx_bytes_for_tokens;
use tracing::warn;
@@ -104,8 +103,6 @@ pub(crate) fn with_config_overrides(mut model: ModelInfo, config: &Config) -> Mo
if let Some(base_instructions) = &config.base_instructions {
model.base_instructions = base_instructions.clone();
model.model_messages = None;
} else if !config.features.enabled(Feature::Personality) {
model.model_messages = None;
}
model

View File

@@ -1,6 +1,5 @@
use anyhow::Result;
use codex_core::config::types::Personality;
use codex_core::features::Feature;
use codex_core::protocol::AskForApproval;
use codex_core::protocol::EventMsg;
use codex_core::protocol::Op;
@@ -110,11 +109,7 @@ async fn model_and_personality_change_only_appends_model_instructions() -> Resul
)
.await;
let mut builder = test_codex()
.with_model("gpt-5.2-codex")
.with_config(|config| {
config.features.enable(Feature::Personality);
});
let mut builder = test_codex().with_model("gpt-5.2-codex");
let test = builder.build(&server).await?;
let next_model = "exp-codex-personality";

View File

@@ -44,8 +44,7 @@ const LOCAL_PRAGMATIC_TEMPLATE: &str = "You are a deeply pragmatic, effective so
async fn personality_does_not_mutate_base_instructions_without_template() {
let codex_home = TempDir::new().expect("create temp dir");
let mut config = load_default_config_for_test(&codex_home).await;
config.features.enable(Feature::Personality);
config.personality = Some(Personality::Friendly);
config.personality = Personality::Friendly;
let model_info = ModelsManager::construct_model_info_offline("gpt-5.1", &config);
assert_eq!(
@@ -58,8 +57,7 @@ async fn personality_does_not_mutate_base_instructions_without_template() {
async fn base_instructions_override_disables_personality_template() {
let codex_home = TempDir::new().expect("create temp dir");
let mut config = load_default_config_for_test(&codex_home).await;
config.features.enable(Feature::Personality);
config.personality = Some(Personality::Friendly);
config.personality = Personality::Friendly;
config.base_instructions = Some("override instructions".to_string());
let model_info = ModelsManager::construct_model_info_offline("gpt-5.2-codex", &config);
@@ -81,7 +79,6 @@ async fn user_turn_personality_none_does_not_add_update_message() -> anyhow::Res
.with_model("gpt-5.2-codex")
.with_config(|config| {
config.features.disable(Feature::RemoteModels);
config.features.enable(Feature::Personality);
});
let test = builder.build(&server).await?;
@@ -127,8 +124,7 @@ async fn config_personality_some_sets_instructions_template() -> anyhow::Result<
.with_model("gpt-5.2-codex")
.with_config(|config| {
config.features.disable(Feature::RemoteModels);
config.features.enable(Feature::Personality);
config.personality = Some(Personality::Friendly);
config.personality = Personality::Friendly;
});
let test = builder.build(&server).await?;
@@ -181,8 +177,7 @@ async fn config_personality_none_sends_no_personality() -> anyhow::Result<()> {
.with_model("gpt-5.2-codex")
.with_config(|config| {
config.features.disable(Feature::RemoteModels);
config.features.enable(Feature::Personality);
config.personality = Some(Personality::None);
config.personality = Personality::None;
});
let test = builder.build(&server).await?;
@@ -242,7 +237,6 @@ async fn default_personality_is_pragmatic_without_config_toml() -> anyhow::Resul
.with_model("gpt-5.2-codex")
.with_config(|config| {
config.features.disable(Feature::RemoteModels);
config.features.enable(Feature::Personality);
});
let test = builder.build(&server).await?;
@@ -290,7 +284,6 @@ async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()>
.with_model("exp-codex-personality")
.with_config(|config| {
config.features.disable(Feature::RemoteModels);
config.features.enable(Feature::Personality);
});
let test = builder.build(&server).await?;
@@ -386,8 +379,7 @@ async fn user_turn_personality_same_value_does_not_add_update_message() -> anyho
.with_model("exp-codex-personality")
.with_config(|config| {
config.features.disable(Feature::RemoteModels);
config.features.enable(Feature::Personality);
config.personality = Some(Personality::Pragmatic);
config.personality = Personality::Pragmatic;
});
let test = builder.build(&server).await?;
@@ -463,111 +455,6 @@ async fn user_turn_personality_same_value_does_not_add_update_message() -> anyho
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn instructions_uses_base_if_feature_disabled() -> anyhow::Result<()> {
let codex_home = TempDir::new().expect("create temp dir");
let mut config = load_default_config_for_test(&codex_home).await;
config.features.disable(Feature::Personality);
config.personality = Some(Personality::Friendly);
let model_info = ModelsManager::construct_model_info_offline("gpt-5.2-codex", &config);
assert_eq!(
model_info.get_model_instructions(config.personality),
model_info.base_instructions
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn user_turn_personality_skips_if_feature_disabled() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
let resp_mock = mount_sse_sequence(
&server,
vec![sse_completed("resp-1"), sse_completed("resp-2")],
)
.await;
let mut builder = test_codex()
.with_model("exp-codex-personality")
.with_config(|config| {
config.features.disable(Feature::RemoteModels);
config.features.disable(Feature::Personality);
});
let test = builder.build(&server).await?;
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd_path().to_path_buf(),
approval_policy: test.config.approval_policy.value(),
sandbox_policy: SandboxPolicy::ReadOnly,
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
test.codex
.submit(Op::OverrideTurnContext {
cwd: None,
approval_policy: None,
sandbox_policy: None,
windows_sandbox_level: None,
model: None,
effort: None,
summary: None,
collaboration_mode: None,
personality: Some(Personality::Pragmatic),
})
.await?;
test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd_path().to_path_buf(),
approval_policy: test.config.approval_policy.value(),
sandbox_policy: SandboxPolicy::ReadOnly,
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;
wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;
let requests = resp_mock.requests();
assert_eq!(requests.len(), 2, "expected two requests");
let request = requests
.last()
.expect("expected personality update request");
let developer_texts = request.message_input_texts("developer");
let personality_text = developer_texts
.iter()
.find(|text| text.contains("<personality_spec>"));
assert!(
personality_text.is_none(),
"expected no personality preamble, got {personality_text:?}"
);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn ignores_remote_personality_if_remote_models_disabled() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));
@@ -629,9 +516,8 @@ async fn ignores_remote_personality_if_remote_models_disabled() -> anyhow::Resul
.with_auth(codex_core::CodexAuth::create_dummy_chatgpt_auth_for_testing())
.with_config(|config| {
config.features.disable(Feature::RemoteModels);
config.features.enable(Feature::Personality);
config.model = Some(remote_slug.to_string());
config.personality = Some(Personality::Friendly);
config.personality = Personality::Friendly;
});
let test = builder.build(&server).await?;
@@ -745,9 +631,8 @@ async fn remote_model_friendly_personality_instructions_with_feature() -> anyhow
.with_auth(codex_core::CodexAuth::create_dummy_chatgpt_auth_for_testing())
.with_config(|config| {
config.features.enable(Feature::RemoteModels);
config.features.enable(Feature::Personality);
config.model = Some(remote_slug.to_string());
config.personality = Some(Personality::Friendly);
config.personality = Personality::Friendly;
});
let test = builder.build(&server).await?;
@@ -860,7 +745,6 @@ async fn user_turn_personality_remote_model_template_includes_update_message() -
.with_auth(codex_core::CodexAuth::create_dummy_chatgpt_auth_for_testing())
.with_config(|config| {
config.features.enable(Feature::RemoteModels);
config.features.enable(Feature::Personality);
config.model = Some("gpt-5.2-codex".to_string());
});
let test = builder.build(&server).await?;

View File

@@ -80,6 +80,7 @@ pub enum WindowsSandboxLevel {
#[derive(
Debug,
Default,
Serialize,
Deserialize,
Clone,
@@ -98,6 +99,7 @@ pub enum WindowsSandboxLevel {
pub enum Personality {
None,
Friendly,
#[default]
Pragmatic,
}

View File

@@ -265,7 +265,7 @@ impl ModelInfo {
.is_some_and(ModelMessages::supports_personality)
}
pub fn get_model_instructions(&self, personality: Option<Personality>) -> String {
pub fn get_model_instructions(&self, personality: Personality) -> String {
if let Some(model_messages) = &self.model_messages
&& let Some(template) = &model_messages.instructions_template
{
@@ -274,15 +274,13 @@ impl ModelInfo {
.get_personality_message(personality)
.unwrap_or_default();
template.replace(PERSONALITY_PLACEHOLDER, personality_message.as_str())
} else if let Some(personality) = personality {
} else {
warn!(
model = %self.slug,
%personality,
"Model personality requested but model_messages is missing, falling back to base instructions."
);
self.base_instructions.clone()
} else {
self.base_instructions.clone()
}
}
}
@@ -311,7 +309,7 @@ impl ModelMessages {
.is_some_and(ModelInstructionsVariables::is_complete)
}
pub fn get_personality_message(&self, personality: Option<Personality>) -> Option<String> {
pub fn get_personality_message(&self, personality: Personality) -> Option<String> {
self.instructions_variables
.as_ref()
.and_then(|variables| variables.get_personality_message(personality))
@@ -332,15 +330,11 @@ impl ModelInstructionsVariables {
&& self.personality_pragmatic.is_some()
}
pub fn get_personality_message(&self, personality: Option<Personality>) -> Option<String> {
if let Some(personality) = personality {
match personality {
Personality::None => Some(String::new()),
Personality::Friendly => self.personality_friendly.clone(),
Personality::Pragmatic => self.personality_pragmatic.clone(),
}
} else {
self.personality_default.clone()
pub fn get_personality_message(&self, personality: Personality) -> Option<String> {
match personality {
Personality::None => self.personality_default.clone(),
Personality::Friendly => self.personality_friendly.clone(),
Personality::Pragmatic => self.personality_pragmatic.clone(),
}
}
}
@@ -524,7 +518,7 @@ mod tests {
instructions_variables: Some(personality_variables()),
}));
let instructions = model.get_model_instructions(Some(Personality::Friendly));
let instructions = model.get_model_instructions(Personality::Friendly);
assert_eq!(instructions, "Hello friendly");
}
@@ -540,18 +534,14 @@ mod tests {
}),
}));
assert_eq!(
model.get_model_instructions(Some(Personality::Friendly)),
model.get_model_instructions(Personality::Friendly),
"Hello\nfriendly"
);
assert_eq!(
model.get_model_instructions(Some(Personality::Pragmatic)),
model.get_model_instructions(Personality::Pragmatic),
"Hello\n"
);
assert_eq!(
model.get_model_instructions(Some(Personality::None)),
"Hello\n"
);
assert_eq!(model.get_model_instructions(None), "Hello\n");
assert_eq!(model.get_model_instructions(Personality::None), "Hello\n");
let model_no_personality = test_model(Some(ModelMessages {
instructions_template: Some("Hello\n{{ personality }}".to_string()),
@@ -562,18 +552,17 @@ mod tests {
}),
}));
assert_eq!(
model_no_personality.get_model_instructions(Some(Personality::Friendly)),
model_no_personality.get_model_instructions(Personality::Friendly),
"Hello\n"
);
assert_eq!(
model_no_personality.get_model_instructions(Some(Personality::Pragmatic)),
model_no_personality.get_model_instructions(Personality::Pragmatic),
"Hello\n"
);
assert_eq!(
model_no_personality.get_model_instructions(Some(Personality::None)),
model_no_personality.get_model_instructions(Personality::None),
"Hello\n"
);
assert_eq!(model_no_personality.get_model_instructions(None), "Hello\n");
}
#[test]
@@ -587,7 +576,7 @@ mod tests {
}),
}));
let instructions = model.get_model_instructions(Some(Personality::Friendly));
let instructions = model.get_model_instructions(Personality::Friendly);
assert_eq!(instructions, "base");
}
@@ -596,7 +585,7 @@ mod tests {
fn get_personality_message_returns_default_when_personality_is_none() {
let personality_template = personality_variables();
assert_eq!(
personality_template.get_personality_message(None),
personality_template.get_personality_message(Personality::None),
Some("default".to_string())
);
}
@@ -605,11 +594,11 @@ mod tests {
fn get_personality_message() {
let personality_variables = personality_variables();
assert_eq!(
personality_variables.get_personality_message(Some(Personality::Friendly)),
personality_variables.get_personality_message(Personality::Friendly),
Some("friendly".to_string())
);
assert_eq!(
personality_variables.get_personality_message(Some(Personality::Pragmatic)),
personality_variables.get_personality_message(Personality::Pragmatic),
Some("pragmatic".to_string())
);
assert_eq!(
@@ -627,21 +616,17 @@ mod tests {
personality_pragmatic: None,
};
assert_eq!(
personality_variables.get_personality_message(Some(Personality::Friendly)),
personality_variables.get_personality_message(Personality::Friendly),
None
);
assert_eq!(
personality_variables.get_personality_message(Some(Personality::Pragmatic)),
personality_variables.get_personality_message(Personality::Pragmatic),
None
);
assert_eq!(
personality_variables.get_personality_message(Some(Personality::None)),
personality_variables.get_personality_message(Personality::None),
Some(String::new())
);
assert_eq!(
personality_variables.get_personality_message(None),
Some("default".to_string())
);
let personality_variables = ModelInstructionsVariables {
personality_default: None,
@@ -649,17 +634,16 @@ mod tests {
personality_pragmatic: Some("pragmatic".to_string()),
};
assert_eq!(
personality_variables.get_personality_message(Some(Personality::Friendly)),
personality_variables.get_personality_message(Personality::Friendly),
Some("friendly".to_string())
);
assert_eq!(
personality_variables.get_personality_message(Some(Personality::Pragmatic)),
personality_variables.get_personality_message(Personality::Pragmatic),
Some("pragmatic".to_string())
);
assert_eq!(
personality_variables.get_personality_message(Some(Personality::None)),
personality_variables.get_personality_message(Personality::None),
Some(String::new())
);
assert_eq!(personality_variables.get_personality_message(None), None);
}
}

View File

@@ -2414,7 +2414,7 @@ impl App {
}
fn on_update_personality(&mut self, personality: Personality) {
self.config.personality = Some(personality);
self.config.personality = personality;
self.chat_widget.set_personality(personality);
}

View File

@@ -466,9 +466,6 @@ impl ChatComposer {
self.collaboration_mode_indicator = indicator;
}
pub fn set_personality_command_enabled(&mut self, enabled: bool) {
self.personality_command_enabled = enabled;
}
/// Centralized feature gating keeps config checks out of call sites.
fn popups_enabled(&self) -> bool {
self.config.popups_enabled

View File

@@ -277,11 +277,6 @@ impl BottomPane {
self.request_redraw();
}
pub fn set_personality_command_enabled(&mut self, enabled: bool) {
self.composer.set_personality_command_enabled(enabled);
self.request_redraw();
}
pub fn status_widget(&self) -> Option<&StatusIndicatorWidget> {
self.status.as_ref()
}

View File

@@ -1030,7 +1030,6 @@ impl ChatWidget {
None,
);
self.refresh_model_display();
self.sync_personality_command_enabled();
let session_info_cell = history_cell::new_session_info(
&self.config,
&model_for_header,
@@ -2680,7 +2679,6 @@ impl ChatWidget {
widget.bottom_pane.set_collaboration_modes_enabled(
widget.config.features.enabled(Feature::CollaborationModes),
);
widget.sync_personality_command_enabled();
#[cfg(target_os = "windows")]
widget.bottom_pane.set_windows_degraded_sandbox_active(
codex_core::windows_sandbox::ELEVATED_SANDBOX_NUX_ENABLED
@@ -2844,7 +2842,6 @@ impl ChatWidget {
widget.bottom_pane.set_collaboration_modes_enabled(
widget.config.features.enabled(Feature::CollaborationModes),
);
widget.sync_personality_command_enabled();
widget
}
@@ -2997,7 +2994,6 @@ impl ChatWidget {
widget.bottom_pane.set_collaboration_modes_enabled(
widget.config.features.enabled(Feature::CollaborationModes),
);
widget.sync_personality_command_enabled();
#[cfg(target_os = "windows")]
widget.bottom_pane.set_windows_degraded_sandbox_active(
codex_core::windows_sandbox::ELEVATED_SANDBOX_NUX_ENABLED
@@ -3776,11 +3772,7 @@ impl ChatWidget {
} else {
None
};
let personality = self
.config
.personality
.filter(|_| self.config.features.enabled(Feature::Personality))
.filter(|_| self.current_model_supports_personality());
let personality = self.config.personality;
let op = Op::UserTurn {
items,
cwd: self.config.cwd.clone(),
@@ -3791,7 +3783,7 @@ impl ChatWidget {
summary: self.config.model_reasoning_summary,
final_output_json_schema: None,
collaboration_mode,
personality,
personality: Some(personality),
};
self.codex_op_tx.send(op).unwrap_or_else(|e| {
@@ -4727,7 +4719,7 @@ impl ChatWidget {
}
fn open_personality_popup_for_current_model(&mut self) {
let current_personality = self.config.personality.unwrap_or(Personality::Friendly);
let current_personality = self.config.personality;
let personalities = [Personality::Friendly, Personality::Pragmatic];
let supports_personality = self.current_model_supports_personality();
@@ -5975,9 +5967,6 @@ impl ChatWidget {
self.refresh_model_display();
self.request_redraw();
}
if feature == Feature::Personality {
self.sync_personality_command_enabled();
}
#[cfg(target_os = "windows")]
if matches!(
feature,
@@ -6030,7 +6019,7 @@ impl ChatWidget {
/// Set the personality in the widget's config copy.
pub(crate) fn set_personality(&mut self, personality: Personality) {
self.config.personality = Some(personality);
self.config.personality = personality;
}
/// Set the model in the widget's config copy and stored collaboration mode.
@@ -6056,11 +6045,6 @@ impl ChatWidget {
.unwrap_or_else(|| self.current_collaboration_mode.model())
}
fn sync_personality_command_enabled(&mut self) {
self.bottom_pane
.set_personality_command_enabled(self.config.features.enabled(Feature::Personality));
}
fn current_model_supports_personality(&self) -> bool {
let model = self.current_model();
self.models_manager

View File

@@ -3181,7 +3181,6 @@ async fn collab_mode_toggle_on_applies_default_preset() {
#[tokio::test]
async fn user_turn_includes_personality_from_config() {
let (mut chat, _rx, mut op_rx) = make_chatwidget_manual(Some("bengalfox")).await;
chat.set_feature_enabled(Feature::Personality, true);
chat.thread_id = Some(ThreadId::new());
chat.set_model("bengalfox");
chat.set_personality(Personality::Friendly);