feat(app-server) Expose personality (#9674)

### Motivation
Exposes a per-thread / per-turn `personality` override in the v2
app-server API so clients can influence model communication style at
thread/turn start. Ensures the override is passed into the session
configuration resolution so it becomes effective for subsequent turns
and headless runners.

### Testing
- [x] Add an integration-style test
`turn_start_accepts_personality_override_v2` in
`codex-rs/app-server/tests/suite/v2/turn_start.rs` that verifies a
`/personality` override results in a developer update message containing
`<personality_spec>` in the outbound model request.

------
[Codex
Task](https://chatgpt.com/codex/tasks/task_i_6971d646b1c08322a689a54d2649f3fe)
This commit is contained in:
Dylan Hurd
2026-01-22 18:00:20 -08:00
committed by GitHub
parent a2c829a808
commit 2b1ee24e11
7 changed files with 296 additions and 26 deletions

View File

@@ -6,6 +6,7 @@ use codex_protocol::account::PlanType;
use codex_protocol::approvals::ExecPolicyAmendment as CoreExecPolicyAmendment;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::ReasoningSummary;
use codex_protocol::config_types::SandboxMode as CoreSandboxMode;
use codex_protocol::config_types::Verbosity;
@@ -1079,6 +1080,7 @@ pub struct ThreadStartParams {
pub config: Option<HashMap<String, JsonValue>>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
pub personality: Option<Personality>,
/// If true, opt into emitting raw response items on the event stream.
///
/// This is for internal use only (e.g. Codex Cloud).
@@ -1133,6 +1135,7 @@ pub struct ThreadResumeParams {
pub config: Option<HashMap<String, serde_json::Value>>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
pub personality: Option<Personality>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
@@ -1608,6 +1611,8 @@ pub struct TurnStartParams {
pub effort: Option<ReasoningEffort>,
/// Override the reasoning summary for this turn and subsequent turns.
pub summary: Option<ReasoningSummary>,
/// Override the personality for this turn and subsequent turns.
pub personality: Option<Personality>,
/// Optional JSON Schema used to constrain the final assistant message for this turn.
pub output_schema: Option<JsonValue>,

View File

@@ -114,6 +114,7 @@ Start a fresh thread when you need a new Codex conversation.
"cwd": "/Users/me/project",
"approvalPolicy": "never",
"sandbox": "workspaceWrite",
"personality": "friendly"
} }
{ "id": 10, "result": {
"thread": {
@@ -126,10 +127,13 @@ Start a fresh thread when you need a new Codex conversation.
{ "method": "thread/started", "params": { "thread": { } } }
```
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted:
To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted. You can also pass the same configuration overrides supported by `thread/start`, such as `personality`:
```json
{ "method": "thread/resume", "id": 11, "params": { "threadId": "thr_123" } }
{ "method": "thread/resume", "id": 11, "params": {
"threadId": "thr_123",
"personality": "friendly"
} }
{ "id": 11, "result": { "thread": { "id": "thr_123", } } }
```
@@ -231,6 +235,7 @@ You can optionally specify config overrides on the new turn. If specified, these
"model": "gpt-5.1-codex",
"effort": "medium",
"summary": "concise",
"personality": "friendly",
// Optional JSON Schema to constrain the final assistant message for this turn.
"outputSchema": {
"type": "object",

View File

@@ -169,6 +169,7 @@ use codex_login::ShutdownHandle;
use codex_login::run_login_server;
use codex_protocol::ThreadId;
use codex_protocol::config_types::ForcedLoginMethod;
use codex_protocol::config_types::Personality;
use codex_protocol::items::TurnItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::GitInfo as CoreGitInfo;
@@ -1405,6 +1406,7 @@ impl CodexMessageProcessor {
params.sandbox,
params.base_instructions,
params.developer_instructions,
params.personality,
);
let config =
@@ -1518,6 +1520,7 @@ impl CodexMessageProcessor {
sandbox: Option<SandboxMode>,
base_instructions: Option<String>,
developer_instructions: Option<String>,
personality: Option<Personality>,
) -> ConfigOverrides {
ConfigOverrides {
model,
@@ -1529,6 +1532,7 @@ impl CodexMessageProcessor {
codex_linux_sandbox_exe: self.codex_linux_sandbox_exe.clone(),
base_instructions,
developer_instructions,
model_personality: personality,
..Default::default()
}
}
@@ -1836,6 +1840,7 @@ impl CodexMessageProcessor {
config: request_overrides,
base_instructions,
developer_instructions,
personality,
} = params;
let thread_history = if let Some(history) = history {
@@ -1921,6 +1926,7 @@ impl CodexMessageProcessor {
sandbox,
base_instructions,
developer_instructions,
personality,
);
// Derive a Config using the same logic as new conversation, honoring overrides if provided.
@@ -2105,6 +2111,7 @@ impl CodexMessageProcessor {
sandbox,
base_instructions,
developer_instructions,
None,
);
// Derive a Config using the same logic as new conversation, honoring overrides if provided.
let config = match derive_config_for_cwd(
@@ -3615,7 +3622,8 @@ impl CodexMessageProcessor {
|| params.model.is_some()
|| params.effort.is_some()
|| params.summary.is_some()
|| params.collaboration_mode.is_some();
|| params.collaboration_mode.is_some()
|| params.personality.is_some();
// If any overrides are provided, update the session turn context first.
if has_any_overrides {
@@ -3628,7 +3636,7 @@ impl CodexMessageProcessor {
effort: params.effort.map(Some),
summary: params.summary,
collaboration_mode: params.collaboration_mode,
personality: None,
personality: params.personality,
})
.await;
}

View File

@@ -11,18 +11,23 @@ use codex_app_server_protocol::ThreadResumeParams;
use codex_app_server_protocol::ThreadResumeResponse;
use codex_app_server_protocol::ThreadStartParams;
use codex_app_server_protocol::ThreadStartResponse;
use codex_app_server_protocol::TurnStartParams;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput;
use codex_protocol::config_types::Personality;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ResponseItem;
use codex_protocol::user_input::ByteRange;
use codex_protocol::user_input::TextElement;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::path::PathBuf;
use tempfile::TempDir;
use tokio::time::timeout;
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const DEFAULT_BASE_INSTRUCTIONS: &str = "You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.";
#[tokio::test]
async fn thread_resume_returns_original_thread() -> Result<()> {
@@ -248,6 +253,91 @@ async fn thread_resume_supports_history_and_overrides() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn thread_resume_accepts_personality_override_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
]);
let response_mock = responses::mount_sse_once(&server, body).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let start_id = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
..Default::default()
})
.await?;
let start_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(start_id)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(start_resp)?;
let resume_id = mcp
.send_thread_resume_request(ThreadResumeParams {
thread_id: thread.id.clone(),
model: Some("gpt-5.2-codex".to_string()),
personality: Some(Personality::Friendly),
..Default::default()
})
.await?;
let resume_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(resume_id)),
)
.await??;
let _resume: ThreadResumeResponse = to_response::<ThreadResumeResponse>(resume_resp)?;
let turn_id = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id,
input: vec![UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
..Default::default()
})
.await?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_id)),
)
.await??;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let request = response_mock.single_request();
let developer_texts = request.message_input_texts("developer");
assert!(
!developer_texts
.iter()
.any(|text| text.contains("<personality_spec>")),
"did not expect a personality update message in developer input, got {developer_texts:?}"
);
let instructions_text = request.instructions_text();
assert!(
instructions_text.contains(DEFAULT_BASE_INSTRUCTIONS),
"expected default base instructions from history, got {instructions_text:?}"
);
Ok(())
}
// Helper to create a config.toml pointing at the mock model server.
fn create_config_toml(codex_home: &std::path::Path, server_uri: &str) -> std::io::Result<()> {
let config_toml = codex_home.join("config.toml");
@@ -261,6 +351,9 @@ sandbox_mode = "read-only"
model_provider = "mock_provider"
[features]
remote_models = false
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"

View File

@@ -34,13 +34,17 @@ use codex_app_server_protocol::TurnStartResponse;
use codex_app_server_protocol::TurnStartedNotification;
use codex_app_server_protocol::TurnStatus;
use codex_app_server_protocol::UserInput as V2UserInput;
use codex_core::features::FEATURES;
use codex_core::features::Feature;
use codex_core::protocol_config_types::ReasoningSummary;
use codex_protocol::config_types::CollaborationMode;
use codex_protocol::config_types::Personality;
use codex_protocol::config_types::Settings;
use codex_protocol::openai_models::ReasoningEffort;
use core_test_support::responses;
use core_test_support::skip_if_no_network;
use pretty_assertions::assert_eq;
use std::collections::BTreeMap;
use std::path::Path;
use tempfile::TempDir;
use tokio::time::timeout;
@@ -54,7 +58,12 @@ async fn turn_start_sends_originator_header() -> Result<()> {
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(
@@ -124,7 +133,12 @@ async fn turn_start_emits_user_message_item_with_text_elements() -> Result<()> {
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -211,7 +225,12 @@ async fn turn_start_emits_notifications_and_accepts_model_override() -> Result<(
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -321,13 +340,19 @@ async fn turn_start_accepts_collaboration_mode_override_v2() -> Result<()> {
let response_mock = responses::mount_sse_once(&server, body).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
..Default::default()
})
.await?;
@@ -379,6 +404,81 @@ async fn turn_start_accepts_collaboration_mode_override_v2() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_personality_override_v2() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = responses::start_mock_server().await;
let body = responses::sse(vec![
responses::ev_response_created("resp-1"),
responses::ev_assistant_message("msg-1", "Done"),
responses::ev_completed("resp-1"),
]);
let response_mock = responses::mount_sse_once(&server, body).await;
let codex_home = TempDir::new()?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
let thread_req = mcp
.send_thread_start_request(ThreadStartParams {
model: Some("gpt-5.2-codex".to_string()),
..Default::default()
})
.await?;
let thread_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(thread_req)),
)
.await??;
let ThreadStartResponse { thread, .. } = to_response::<ThreadStartResponse>(thread_resp)?;
let turn_req = mcp
.send_turn_start_request(TurnStartParams {
thread_id: thread.id.clone(),
input: vec![V2UserInput::Text {
text: "Hello".to_string(),
text_elements: Vec::new(),
}],
personality: Some(Personality::Friendly),
..Default::default()
})
.await?;
let turn_resp: JSONRPCResponse = timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(turn_req)),
)
.await??;
let _turn: TurnStartResponse = to_response::<TurnStartResponse>(turn_resp)?;
timeout(
DEFAULT_READ_TIMEOUT,
mcp.read_stream_until_notification_message("turn/completed"),
)
.await??;
let request = response_mock.single_request();
let developer_texts = request.message_input_texts("developer");
if developer_texts.is_empty() {
eprintln!("request body: {}", request.body_json());
}
assert!(
developer_texts
.iter()
.any(|text| text.contains("<personality_spec>")),
"expected personality update message in developer input, got {developer_texts:?}"
);
Ok(())
}
#[tokio::test]
async fn turn_start_accepts_local_image_input() -> Result<()> {
// Two Codex turns hit the mock model (session start + turn/start).
@@ -391,7 +491,12 @@ async fn turn_start_accepts_local_image_input() -> Result<()> {
let server = create_mock_responses_server_sequence_unchecked(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -466,7 +571,12 @@ async fn turn_start_exec_approval_toggle_v2() -> Result<()> {
];
let server = create_mock_responses_server_sequence(responses).await;
// Default approval is untrusted to force elicitation on first turn.
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
create_config_toml(
codex_home.as_path(),
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -591,7 +701,12 @@ async fn turn_start_exec_approval_decline_v2() -> Result<()> {
create_final_assistant_message_sse_response("done")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(codex_home.as_path(), &server.uri(), "untrusted")?;
create_config_toml(
codex_home.as_path(),
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(codex_home.as_path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -738,7 +853,12 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
create_final_assistant_message_sse_response("done second")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -776,6 +896,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
personality: None,
output_schema: None,
collaboration_mode: None,
})
@@ -806,6 +927,7 @@ async fn turn_start_updates_sandbox_and_cwd_between_turns_v2() -> Result<()> {
model: Some("mock-model".to_string()),
effort: Some(ReasoningEffort::Medium),
summary: Some(ReasoningSummary::Auto),
personality: None,
output_schema: None,
collaboration_mode: None,
})
@@ -876,7 +998,12 @@ async fn turn_start_file_change_approval_v2() -> Result<()> {
create_final_assistant_message_sse_response("patch applied")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1053,7 +1180,12 @@ async fn turn_start_file_change_approval_accept_for_session_persists_v2() -> Res
create_final_assistant_message_sse_response("patch 2 applied")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1229,7 +1361,12 @@ async fn turn_start_file_change_approval_decline_v2() -> Result<()> {
create_final_assistant_message_sse_response("patch declined")?,
];
let server = create_mock_responses_server_sequence(responses).await;
create_config_toml(&codex_home, &server.uri(), "untrusted")?;
create_config_toml(
&codex_home,
&server.uri(),
"untrusted",
&BTreeMap::default(),
)?;
let mut mcp = McpProcess::new(&codex_home).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1369,16 +1506,12 @@ async fn command_execution_notifications_include_process_id() -> Result<()> {
];
let server = create_mock_responses_server_sequence(responses).await;
let codex_home = TempDir::new()?;
create_config_toml(codex_home.path(), &server.uri(), "never")?;
let config_toml = codex_home.path().join("config.toml");
let mut config_contents = std::fs::read_to_string(&config_toml)?;
config_contents.push_str(
r#"
[features]
unified_exec = true
"#,
);
std::fs::write(&config_toml, config_contents)?;
create_config_toml(
codex_home.path(),
&server.uri(),
"never",
&BTreeMap::from([(Feature::UnifiedExec, true)]),
)?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
@@ -1502,7 +1635,24 @@ fn create_config_toml(
codex_home: &Path,
server_uri: &str,
approval_policy: &str,
feature_flags: &BTreeMap<Feature, bool>,
) -> std::io::Result<()> {
let mut features = BTreeMap::from([(Feature::RemoteModels, false)]);
for (feature, enabled) in feature_flags {
features.insert(*feature, *enabled);
}
let feature_entries = features
.into_iter()
.map(|(feature, enabled)| {
let key = FEATURES
.iter()
.find(|spec| spec.id == feature)
.map(|spec| spec.key)
.unwrap_or_else(|| panic!("missing feature key for {feature:?}"));
format!("{key} = {enabled}")
})
.collect::<Vec<_>>()
.join("\n");
let config_toml = codex_home.join("config.toml");
std::fs::write(
config_toml,
@@ -1514,6 +1664,9 @@ sandbox_mode = "read-only"
model_provider = "mock_provider"
[features]
{feature_entries}
[model_providers.mock_provider]
name = "Mock provider for test"
base_url = "{server_uri}/v1"

View File

@@ -1141,6 +1141,7 @@ pub struct ConfigOverrides {
pub codex_linux_sandbox_exe: Option<PathBuf>,
pub base_instructions: Option<String>,
pub developer_instructions: Option<String>,
pub model_personality: Option<Personality>,
pub compact_prompt: Option<String>,
pub include_apply_patch_tool: Option<bool>,
pub show_raw_agent_reasoning: Option<bool>,
@@ -1228,6 +1229,7 @@ impl Config {
codex_linux_sandbox_exe,
base_instructions,
developer_instructions,
model_personality,
compact_prompt,
include_apply_patch_tool: include_apply_patch_tool_override,
show_raw_agent_reasoning,
@@ -1433,6 +1435,9 @@ impl Config {
Self::try_read_non_empty_file(model_instructions_path, "model instructions file")?;
let base_instructions = base_instructions.or(file_base_instructions);
let developer_instructions = developer_instructions.or(cfg.developer_instructions);
let model_personality = model_personality
.or(config_profile.model_personality)
.or(cfg.model_personality);
let experimental_compact_prompt_path = config_profile
.experimental_compact_prompt_file
@@ -1482,7 +1487,7 @@ impl Config {
notify: cfg.notify,
user_instructions,
base_instructions,
model_personality: config_profile.model_personality.or(cfg.model_personality),
model_personality,
developer_instructions,
compact_prompt,
// The config.toml omits "_mode" because it's a config file. However, "_mode"

View File

@@ -211,6 +211,7 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option<PathBuf>) -> any
codex_linux_sandbox_exe,
base_instructions: None,
developer_instructions: None,
model_personality: None,
compact_prompt: None,
include_apply_patch_tool: None,
show_raw_agent_reasoning: oss.then_some(true),