Compare commits

...

11 Commits

Author SHA1 Message Date
Charles Cunningham
c94070d59e Match from_policy signature on main 2026-02-16 22:46:01 -08:00
Charles Cunningham
37ae399a74 Drop settings update params wrapper 2026-02-16 22:27:37 -08:00
Charles Cunningham
05f987a6c0 Move to context manager 2026-02-16 19:12:57 -08:00
Charles Cunningham
63133fbd83 Add TODO for pure persisted context diffing 2026-02-16 19:12:57 -08:00
Charles Cunningham
8d71af2409 Centralize context update diffing logic 2026-02-16 19:12:57 -08:00
Eric Traut
08f689843f Fixed screen reader regression in CLI (#11860)
The `tui.animations` switch should gate all animations in the TUI, but a
recent change introduced a regression that didn't include the gate. This
makes it difficult to use the TUI with a screen reader.

This fix addresses #11856
2026-02-16 18:17:52 -08:00
Fouad Matin
b37555dd75 add(feedback): over-refusal / safety check (#11948)
Add new feedback option for "Over-refusal / safety check"
2026-02-16 16:24:47 -08:00
Dylan Hurd
19afbc35c1 chore(core) rm Feature::RequestRule (#11866)
## Summary
This feature is now reasonably stable, let's remove it so we can
simplify our upcoming iterations here.

## Testing 
- [x] Existing tests pass
2026-02-16 22:30:23 +00:00
Matthew Zeng
5b421bba34 [apps] Fix app mention syntax. (#11894)
- [x] Fix app mention syntax.
2026-02-16 22:01:49 +00:00
jif-oai
beb5cb4f48 Rename collab modules to multi agents (#11939)
Summary
- rename the `collab` handlers and UI files to `multi_agents` to match
the new naming
- update module references and specs so the handlers and TUI widgets
consistently use the renamed files
- keep the existing functionality while aligning file and module names
with the multi-agent terminology
2026-02-16 19:05:13 +00:00
jif-oai
af434b4f71 feat: drop MCP managing tools if no MCP servers (#11900)
Drop MCP tools if no MCP servers to save context

For this https://github.com/openai/codex/issues/11049
2026-02-16 18:40:45 +00:00
28 changed files with 384 additions and 380 deletions

View File

@@ -477,7 +477,6 @@ fn assert_permissions_message(item: &ResponseItem) {
&SandboxPolicy::DangerFullAccess,
AskForApproval::Never,
&Policy::empty(),
false,
&PathBuf::from("/tmp"),
)
.into_text();

View File

@@ -2,6 +2,6 @@ use crate::mcp::CODEX_APPS_MCP_SERVER_NAME;
pub(crate) fn render_apps_section() -> String {
format!(
"## Apps\nApps are mentioned in the prompt in the format `[$app-name](apps://{{connector_id}})`.\nAn app is equivalent to a set of MCP tools within the `{CODEX_APPS_MCP_SERVER_NAME}` MCP.\nWhen you see an app mention, the app's MCP tools are either already provided in `{CODEX_APPS_MCP_SERVER_NAME}`, or do not exist because the user did not install it.\nDo not additionally call list_mcp_resources for apps that are already mentioned."
"## Apps\nApps are mentioned in the prompt in the format `[$app-name](app://{{connector_id}})`.\nAn app is equivalent to a set of MCP tools within the `{CODEX_APPS_MCP_SERVER_NAME}` MCP.\nWhen you see an app mention, the app's MCP tools are either already provided in `{CODEX_APPS_MCP_SERVER_NAME}`, or do not exist because the user did not install it.\nDo not additionally call list_mcp_resources for apps that are already mentioned."
)
}

View File

@@ -1967,120 +1967,6 @@ impl Session {
state.session_configuration.collaboration_mode.clone()
}
fn build_environment_update_item(
&self,
previous: Option<&Arc<TurnContext>>,
next: &TurnContext,
) -> Option<ResponseItem> {
let prev = previous?;
let shell = self.user_shell();
let prev_context = EnvironmentContext::from_turn_context(prev.as_ref(), shell.as_ref());
let next_context = EnvironmentContext::from_turn_context(next, shell.as_ref());
if prev_context.equals_except_shell(&next_context) {
return None;
}
Some(ResponseItem::from(EnvironmentContext::diff(
prev.as_ref(),
next,
shell.as_ref(),
)))
}
fn build_permissions_update_item(
&self,
previous: Option<&Arc<TurnContext>>,
next: &TurnContext,
) -> Option<ResponseItem> {
let prev = previous?;
if prev.sandbox_policy == next.sandbox_policy
&& prev.approval_policy == next.approval_policy
{
return None;
}
Some(
DeveloperInstructions::from_policy(
&next.sandbox_policy,
next.approval_policy,
self.services.exec_policy.current().as_ref(),
self.features.enabled(Feature::RequestRule),
&next.cwd,
)
.into(),
)
}
fn build_personality_update_item(
&self,
previous: Option<&Arc<TurnContext>>,
next: &TurnContext,
) -> Option<ResponseItem> {
if !self.features.enabled(Feature::Personality) {
return None;
}
let previous = previous?;
if next.model_info.slug != previous.model_info.slug {
return None;
}
// if a personality is specified and it's different from the previous one, build a personality update item
if let Some(personality) = next.personality
&& next.personality != previous.personality
{
let model_info = &next.model_info;
let personality_message = Self::personality_message_for(model_info, personality);
personality_message.map(|personality_message| {
DeveloperInstructions::personality_spec_message(personality_message).into()
})
} else {
None
}
}
fn personality_message_for(model_info: &ModelInfo, personality: Personality) -> Option<String> {
model_info
.model_messages
.as_ref()
.and_then(|spec| spec.get_personality_message(Some(personality)))
.filter(|message| !message.is_empty())
}
fn build_collaboration_mode_update_item(
&self,
previous: Option<&Arc<TurnContext>>,
next: &TurnContext,
) -> Option<ResponseItem> {
let prev = previous?;
if prev.collaboration_mode != next.collaboration_mode {
// If the next mode has empty developer instructions, this returns None and we emit no
// update, so prior collaboration instructions remain in the prompt history.
Some(DeveloperInstructions::from_collaboration_mode(&next.collaboration_mode)?.into())
} else {
None
}
}
fn build_model_instructions_update_item(
&self,
previous: Option<&Arc<TurnContext>>,
resumed_model: Option<&str>,
next: &TurnContext,
) -> Option<ResponseItem> {
let previous_model =
resumed_model.or_else(|| previous.map(|prev| prev.model_info.slug.as_str()))?;
if previous_model == next.model_info.slug {
return None;
}
let model_instructions = next.model_info.get_model_instructions(next.personality);
if model_instructions.is_empty() {
return None;
}
Some(DeveloperInstructions::model_switch_message(model_instructions).into())
}
pub(crate) fn is_model_switch_developer_message(item: &ResponseItem) -> bool {
let ResponseItem::Message { role, content, .. } = item else {
return false;
@@ -2093,42 +1979,26 @@ impl Session {
)
})
}
fn build_settings_update_items(
&self,
previous_context: Option<&Arc<TurnContext>>,
resumed_model: Option<&str>,
current_context: &TurnContext,
) -> Vec<ResponseItem> {
let mut update_items = Vec::new();
if let Some(env_item) =
self.build_environment_update_item(previous_context, current_context)
{
update_items.push(env_item);
}
if let Some(permissions_item) =
self.build_permissions_update_item(previous_context, current_context)
{
update_items.push(permissions_item);
}
if let Some(collaboration_mode_item) =
self.build_collaboration_mode_update_item(previous_context, current_context)
{
update_items.push(collaboration_mode_item);
}
if let Some(model_instructions_item) = self.build_model_instructions_update_item(
previous_context,
// TODO: Make context updates a pure diff of persisted previous/current TurnContextItem
// state so replay/backtracking is deterministic. Runtime inputs that affect model-visible
// context (shell, exec policy, feature gates, resumed model bridge) should be persisted
// state or explicit non-state replay events.
let shell = self.user_shell();
let exec_policy = self.services.exec_policy.current();
crate::context_manager::updates::build_settings_update_items(
previous_context.map(Arc::as_ref),
resumed_model,
current_context,
) {
update_items.push(model_instructions_item);
}
if let Some(personality_item) =
self.build_personality_update_item(previous_context, current_context)
{
update_items.push(personality_item);
}
update_items
shell.as_ref(),
exec_policy.as_ref(),
self.features.enabled(Feature::Personality),
)
}
/// Persist the event to rollout and send it to clients.
@@ -2624,7 +2494,6 @@ impl Session {
&turn_context.sandbox_policy,
turn_context.approval_policy,
self.services.exec_policy.current().as_ref(),
self.features.enabled(Feature::RequestRule),
&turn_context.cwd,
)
.into(),
@@ -2660,7 +2529,10 @@ impl Session {
&& base_instructions == model_info.get_model_instructions(Some(personality));
if !has_baked_personality
&& let Some(personality_message) =
Self::personality_message_for(&model_info, personality)
crate::context_manager::updates::personality_message_for(
&model_info,
personality,
)
{
items.push(
DeveloperInstructions::personality_spec_message(personality_message).into(),
@@ -4997,14 +4869,13 @@ async fn built_tools(
skills_outcome: Option<&SkillLoadOutcome>,
cancellation_token: &CancellationToken,
) -> CodexResult<Arc<ToolRouter>> {
let mut mcp_tools = sess
.services
.mcp_connection_manager
.read()
.await
let mcp_connection_manager = sess.services.mcp_connection_manager.read().await;
let has_mcp_servers = mcp_connection_manager.has_servers();
let mut mcp_tools = mcp_connection_manager
.list_all_tools()
.or_cancel(cancellation_token)
.await?;
drop(mcp_connection_manager);
let mut effective_explicitly_enabled_connectors = explicitly_enabled_connectors.clone();
effective_explicitly_enabled_connectors.extend(sess.get_connector_selection().await);
@@ -5050,12 +4921,12 @@ async fn built_tools(
Ok(Arc::new(ToolRouter::from_config(
&turn_context.tools_config,
Some(
has_mcp_servers.then(|| {
mcp_tools
.into_iter()
.map(|(name, tool)| (name, tool.tool))
.collect(),
),
.collect()
}),
app_tools,
turn_context.dynamic_tools.as_slice(),
)))

View File

@@ -1,5 +1,6 @@
mod history;
mod normalize;
pub(crate) mod updates;
pub(crate) use history::ContextManager;
pub(crate) use history::TotalTokenUsageBreakdown;

View File

@@ -0,0 +1,148 @@
use crate::codex::TurnContext;
use crate::environment_context::EnvironmentContext;
use crate::shell::Shell;
use codex_execpolicy::Policy;
use codex_protocol::config_types::Personality;
use codex_protocol::models::DeveloperInstructions;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::ModelInfo;
fn build_environment_update_item(
previous: Option<&TurnContext>,
next: &TurnContext,
shell: &Shell,
) -> Option<ResponseItem> {
let prev = previous?;
let prev_context = EnvironmentContext::from_turn_context(prev, shell);
let next_context = EnvironmentContext::from_turn_context(next, shell);
if prev_context.equals_except_shell(&next_context) {
return None;
}
Some(ResponseItem::from(EnvironmentContext::diff(
prev, next, shell,
)))
}
fn build_permissions_update_item(
previous: Option<&TurnContext>,
next: &TurnContext,
exec_policy: &Policy,
) -> Option<ResponseItem> {
let prev = previous?;
if prev.sandbox_policy == next.sandbox_policy && prev.approval_policy == next.approval_policy {
return None;
}
Some(
DeveloperInstructions::from_policy(
&next.sandbox_policy,
next.approval_policy,
exec_policy,
&next.cwd,
)
.into(),
)
}
fn build_collaboration_mode_update_item(
previous: Option<&TurnContext>,
next: &TurnContext,
) -> Option<ResponseItem> {
let prev = previous?;
if prev.collaboration_mode != next.collaboration_mode {
// If the next mode has empty developer instructions, this returns None and we emit no
// update, so prior collaboration instructions remain in the prompt history.
Some(DeveloperInstructions::from_collaboration_mode(&next.collaboration_mode)?.into())
} else {
None
}
}
fn build_personality_update_item(
previous: Option<&TurnContext>,
next: &TurnContext,
personality_feature_enabled: bool,
) -> Option<ResponseItem> {
if !personality_feature_enabled {
return None;
}
let previous = previous?;
if next.model_info.slug != previous.model_info.slug {
return None;
}
if let Some(personality) = next.personality
&& next.personality != previous.personality
{
let model_info = &next.model_info;
let personality_message = personality_message_for(model_info, personality);
personality_message
.map(|message| DeveloperInstructions::personality_spec_message(message).into())
} else {
None
}
}
pub(crate) fn personality_message_for(
model_info: &ModelInfo,
personality: Personality,
) -> Option<String> {
model_info
.model_messages
.as_ref()
.and_then(|spec| spec.get_personality_message(Some(personality)))
.filter(|message| !message.is_empty())
}
pub(crate) fn build_model_instructions_update_item(
previous: Option<&TurnContext>,
resumed_model: Option<&str>,
next: &TurnContext,
) -> Option<ResponseItem> {
let previous_model =
resumed_model.or_else(|| previous.map(|prev| prev.model_info.slug.as_str()))?;
if previous_model == next.model_info.slug {
return None;
}
let model_instructions = next.model_info.get_model_instructions(next.personality);
if model_instructions.is_empty() {
return None;
}
Some(DeveloperInstructions::model_switch_message(model_instructions).into())
}
pub(crate) fn build_settings_update_items(
previous: Option<&TurnContext>,
resumed_model: Option<&str>,
next: &TurnContext,
shell: &Shell,
exec_policy: &Policy,
personality_feature_enabled: bool,
) -> Vec<ResponseItem> {
let mut update_items = Vec::new();
if let Some(env_item) = build_environment_update_item(previous, next, shell) {
update_items.push(env_item);
}
if let Some(permissions_item) = build_permissions_update_item(previous, next, exec_policy) {
update_items.push(permissions_item);
}
if let Some(collaboration_mode_item) = build_collaboration_mode_update_item(previous, next) {
update_items.push(collaboration_mode_item);
}
if let Some(model_instructions_item) =
build_model_instructions_update_item(previous, resumed_model, next)
{
update_items.push(model_instructions_item);
}
if let Some(personality_item) =
build_personality_update_item(previous, next, personality_feature_enabled)
{
update_items.push(personality_item);
}
update_items
}

View File

@@ -686,8 +686,6 @@ mod tests {
use crate::config_loader::ConfigLayerStack;
use crate::config_loader::ConfigRequirements;
use crate::config_loader::ConfigRequirementsToml;
use crate::features::Feature;
use crate::features::Features;
use codex_app_server_protocol::ConfigLayerSource;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::SandboxPolicy;
@@ -1281,8 +1279,6 @@ prefix_rule(
"cargo-insta".to_string(),
];
let manager = ExecPolicyManager::default();
let mut features = Features::with_defaults();
features.enable(Feature::RequestRule);
let requirement = manager
.create_exec_approval_requirement_for_command(ExecApprovalRequest {

View File

@@ -511,8 +511,8 @@ pub const FEATURES: &[FeatureSpec] = &[
FeatureSpec {
id: Feature::RequestRule,
key: "request_rule",
stage: Stage::Stable,
default_enabled: true,
stage: Stage::Removed,
default_enabled: false,
},
FeatureSpec {
id: Feature::WindowsSandbox,

View File

@@ -353,6 +353,10 @@ pub(crate) struct McpConnectionManager {
}
impl McpConnectionManager {
pub(crate) fn has_servers(&self) -> bool {
!self.clients.is_empty()
}
pub async fn initialize(
&mut self,
mcp_servers: &HashMap<String, McpServerConfig>,

View File

@@ -1,11 +1,11 @@
pub mod apply_patch;
pub(crate) mod collab;
mod dynamic;
mod grep_files;
mod js_repl;
mod list_dir;
mod mcp;
mod mcp_resource;
pub(crate) mod multi_agents;
mod plan;
mod read_file;
mod request_user_input;
@@ -20,7 +20,6 @@ use serde::Deserialize;
use crate::function_tool::FunctionCallError;
pub use apply_patch::ApplyPatchHandler;
pub use collab::CollabHandler;
pub use dynamic::DynamicToolHandler;
pub use grep_files::GrepFilesHandler;
pub use js_repl::JsReplHandler;
@@ -28,6 +27,7 @@ pub use js_repl::JsReplResetHandler;
pub use list_dir::ListDirHandler;
pub use mcp::McpHandler;
pub use mcp_resource::McpResourceHandler;
pub use multi_agents::MultiAgentHandler;
pub use plan::PlanHandler;
pub use read_file::ReadFileHandler;
pub use request_user_input::RequestUserInputHandler;

View File

@@ -34,7 +34,7 @@ use codex_protocol::user_input::UserInput;
use serde::Deserialize;
use serde::Serialize;
pub struct CollabHandler;
pub struct MultiAgentHandler;
/// Minimum wait timeout to prevent tight polling loops from burning CPU.
pub(crate) const MIN_WAIT_TIMEOUT_MS: i64 = 10_000;
@@ -47,7 +47,7 @@ struct CloseAgentArgs {
}
#[async_trait]
impl ToolHandler for CollabHandler {
impl ToolHandler for MultiAgentHandler {
fn kind(&self) -> ToolKind {
ToolKind::Function
}
@@ -917,7 +917,7 @@ mod tests {
input: "hello".to_string(),
},
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("payload should be rejected");
};
assert_eq!(
@@ -937,7 +937,7 @@ mod tests {
"unknown_tool",
function_payload(json!({})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("tool should be rejected");
};
assert_eq!(
@@ -955,7 +955,7 @@ mod tests {
"spawn_agent",
function_payload(json!({"message": " "})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("empty message should be rejected");
};
assert_eq!(
@@ -978,7 +978,7 @@ mod tests {
"items": [{"type": "mention", "name": "drive", "path": "app://drive"}]
})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("message+items should be rejected");
};
assert_eq!(
@@ -1016,7 +1016,7 @@ mod tests {
"agent_type": "explorer"
})),
);
let output = CollabHandler
let output = MultiAgentHandler
.handle(invocation)
.await
.expect("spawn_agent should succeed");
@@ -1048,7 +1048,7 @@ mod tests {
"spawn_agent",
function_payload(json!({"message": "hello"})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("spawn should fail without a manager");
};
assert_eq!(
@@ -1074,7 +1074,7 @@ mod tests {
"spawn_agent",
function_payload(json!({"message": "hello"})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("spawn should fail when depth limit exceeded");
};
assert_eq!(
@@ -1094,7 +1094,7 @@ mod tests {
"send_input",
function_payload(json!({"id": ThreadId::new().to_string(), "message": ""})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("empty message should be rejected");
};
assert_eq!(
@@ -1118,7 +1118,7 @@ mod tests {
"items": [{"type": "mention", "name": "drive", "path": "app://drive"}]
})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("message+items should be rejected");
};
assert_eq!(
@@ -1138,7 +1138,7 @@ mod tests {
"send_input",
function_payload(json!({"id": "not-a-uuid", "message": "hi"})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("invalid id should be rejected");
};
let FunctionCallError::RespondToModel(msg) = err else {
@@ -1159,7 +1159,7 @@ mod tests {
"send_input",
function_payload(json!({"id": agent_id.to_string(), "message": "hi"})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("missing agent should be reported");
};
assert_eq!(
@@ -1186,7 +1186,7 @@ mod tests {
"interrupt": true
})),
);
CollabHandler
MultiAgentHandler
.handle(invocation)
.await
.expect("send_input should succeed");
@@ -1227,7 +1227,7 @@ mod tests {
]
})),
);
CollabHandler
MultiAgentHandler
.handle(invocation)
.await
.expect("send_input should succeed");
@@ -1267,7 +1267,7 @@ mod tests {
"resume_agent",
function_payload(json!({"id": "not-a-uuid"})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("invalid id should be rejected");
};
let FunctionCallError::RespondToModel(msg) = err else {
@@ -1288,7 +1288,7 @@ mod tests {
"resume_agent",
function_payload(json!({"id": agent_id.to_string()})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("missing agent should be reported");
};
assert_eq!(
@@ -1313,7 +1313,7 @@ mod tests {
function_payload(json!({"id": agent_id.to_string()})),
);
let output = CollabHandler
let output = MultiAgentHandler
.handle(invocation)
.await
.expect("resume_agent should succeed");
@@ -1382,7 +1382,7 @@ mod tests {
"resume_agent",
function_payload(json!({"id": agent_id.to_string()})),
);
let output = CollabHandler
let output = MultiAgentHandler
.handle(resume_invocation)
.await
.expect("resume_agent should succeed");
@@ -1405,7 +1405,7 @@ mod tests {
"send_input",
function_payload(json!({"id": agent_id.to_string(), "message": "hello"})),
);
let output = CollabHandler
let output = MultiAgentHandler
.handle(send_invocation)
.await
.expect("send_input should succeed after resume");
@@ -1450,7 +1450,7 @@ mod tests {
"resume_agent",
function_payload(json!({"id": ThreadId::new().to_string()})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("resume should fail when depth limit exceeded");
};
assert_eq!(
@@ -1479,7 +1479,7 @@ mod tests {
"timeout_ms": 0
})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("non-positive timeout should be rejected");
};
assert_eq!(
@@ -1497,7 +1497,7 @@ mod tests {
"wait",
function_payload(json!({"ids": ["invalid"]})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("invalid id should be rejected");
};
let FunctionCallError::RespondToModel(msg) = err else {
@@ -1515,7 +1515,7 @@ mod tests {
"wait",
function_payload(json!({"ids": []})),
);
let Err(err) = CollabHandler.handle(invocation).await else {
let Err(err) = MultiAgentHandler.handle(invocation).await else {
panic!("empty ids should be rejected");
};
assert_eq!(
@@ -1540,7 +1540,7 @@ mod tests {
"timeout_ms": 1000
})),
);
let output = CollabHandler
let output = MultiAgentHandler
.handle(invocation)
.await
.expect("wait should succeed");
@@ -1584,7 +1584,7 @@ mod tests {
"timeout_ms": MIN_WAIT_TIMEOUT_MS
})),
);
let output = CollabHandler
let output = MultiAgentHandler
.handle(invocation)
.await
.expect("wait should succeed");
@@ -1632,7 +1632,11 @@ mod tests {
})),
);
let early = timeout(Duration::from_millis(50), CollabHandler.handle(invocation)).await;
let early = timeout(
Duration::from_millis(50),
MultiAgentHandler.handle(invocation),
)
.await;
assert!(
early.is_err(),
"wait should not return before the minimum timeout clamp"
@@ -1677,7 +1681,7 @@ mod tests {
"timeout_ms": 1000
})),
);
let output = CollabHandler
let output = MultiAgentHandler
.handle(invocation)
.await
.expect("wait should succeed");
@@ -1717,7 +1721,7 @@ mod tests {
"close_agent",
function_payload(json!({"id": agent_id.to_string()})),
);
let output = CollabHandler
let output = MultiAgentHandler
.handle(invocation)
.await
.expect("close_agent should succeed");

View File

@@ -243,14 +243,6 @@ impl ShellHandler {
freeform,
} = args;
let features = session.features();
let request_rule_enabled = features.enabled(crate::features::Feature::RequestRule);
let prefix_rule = if request_rule_enabled {
prefix_rule
} else {
None
};
let mut exec_params = exec_params;
let dependency_env = session.dependency_env().await;
if !dependency_env.is_empty() {

View File

@@ -142,14 +142,6 @@ impl ToolHandler for UnifiedExecHandler {
..
} = args;
let features = session.features();
let request_rule_enabled = features.enabled(crate::features::Feature::RequestRule);
let prefix_rule = if request_rule_enabled {
prefix_rule
} else {
None
};
if sandbox_permissions.requires_escalated_permissions()
&& !matches!(
context.turn.approval_policy,

View File

@@ -10,9 +10,9 @@ use crate::tools::handlers::SEARCH_TOOL_BM25_DEFAULT_LIMIT;
use crate::tools::handlers::SEARCH_TOOL_BM25_TOOL_NAME;
use crate::tools::handlers::apply_patch::create_apply_patch_freeform_tool;
use crate::tools::handlers::apply_patch::create_apply_patch_json_tool;
use crate::tools::handlers::collab::DEFAULT_WAIT_TIMEOUT_MS;
use crate::tools::handlers::collab::MAX_WAIT_TIMEOUT_MS;
use crate::tools::handlers::collab::MIN_WAIT_TIMEOUT_MS;
use crate::tools::handlers::multi_agents::DEFAULT_WAIT_TIMEOUT_MS;
use crate::tools::handlers::multi_agents::MAX_WAIT_TIMEOUT_MS;
use crate::tools::handlers::multi_agents::MIN_WAIT_TIMEOUT_MS;
use crate::tools::handlers::request_user_input_tool_description;
use crate::tools::registry::ToolRegistryBuilder;
use codex_protocol::config_types::WebSearchMode;
@@ -41,7 +41,6 @@ pub(crate) struct ToolsConfig {
pub js_repl_tools_only: bool,
pub collab_tools: bool,
pub collaboration_modes_tools: bool,
pub request_rule_enabled: bool,
pub experimental_supported_tools: Vec<String>,
}
@@ -64,7 +63,6 @@ impl ToolsConfig {
include_js_repl && features.enabled(Feature::JsReplToolsOnly);
let include_collab_tools = features.enabled(Feature::Collab);
let include_collaboration_modes_tools = features.enabled(Feature::CollaborationModes);
let request_rule_enabled = features.enabled(Feature::RequestRule);
let include_search_tool = features.enabled(Feature::Apps);
let shell_type = if !features.enabled(Feature::ShellTool) {
@@ -101,7 +99,6 @@ impl ToolsConfig {
js_repl_tools_only: include_js_repl_tools_only,
collab_tools: include_collab_tools,
collaboration_modes_tools: include_collaboration_modes_tools,
request_rule_enabled,
experimental_supported_tools: model_info.experimental_supported_tools.clone(),
}
}
@@ -174,7 +171,7 @@ impl From<JsonSchema> for AdditionalProperties {
}
}
fn create_approval_parameters(include_prefix_rule: bool) -> BTreeMap<String, JsonSchema> {
fn create_approval_parameters() -> BTreeMap<String, JsonSchema> {
let mut properties = BTreeMap::from([
(
"sandbox_permissions".to_string(),
@@ -200,23 +197,22 @@ fn create_approval_parameters(include_prefix_rule: bool) -> BTreeMap<String, Jso
),
]);
if include_prefix_rule {
properties.insert(
"prefix_rule".to_string(),
JsonSchema::Array {
items: Box::new(JsonSchema::String { description: None }),
description: Some(
r#"Only specify when sandbox_permissions is `require_escalated`.
properties.insert(
"prefix_rule".to_string(),
JsonSchema::Array {
items: Box::new(JsonSchema::String { description: None }),
description: Some(
r#"Only specify when sandbox_permissions is `require_escalated`.
Suggest a prefix command pattern that will allow you to fulfill similar requests from the user in the future.
Should be a short but reasonable prefix, e.g. [\"git\", \"pull\"] or [\"uv\", \"run\"] or [\"pytest\"]."#.to_string(),
),
});
}
),
},
);
properties
}
fn create_exec_command_tool(include_prefix_rule: bool) -> ToolSpec {
fn create_exec_command_tool() -> ToolSpec {
let mut properties = BTreeMap::from([
(
"cmd".to_string(),
@@ -274,7 +270,7 @@ fn create_exec_command_tool(include_prefix_rule: bool) -> ToolSpec {
},
),
]);
properties.extend(create_approval_parameters(include_prefix_rule));
properties.extend(create_approval_parameters());
ToolSpec::Function(ResponsesApiTool {
name: "exec_command".to_string(),
@@ -337,7 +333,7 @@ fn create_write_stdin_tool() -> ToolSpec {
})
}
fn create_shell_tool(include_prefix_rule: bool) -> ToolSpec {
fn create_shell_tool() -> ToolSpec {
let mut properties = BTreeMap::from([
(
"command".to_string(),
@@ -359,7 +355,7 @@ fn create_shell_tool(include_prefix_rule: bool) -> ToolSpec {
},
),
]);
properties.extend(create_approval_parameters(include_prefix_rule));
properties.extend(create_approval_parameters());
let description = if cfg!(windows) {
r#"Runs a Powershell command (Windows) and returns its output. Arguments to `shell` will be passed to CreateProcessW(). Most commands should be prefixed with ["powershell.exe", "-Command"].
@@ -390,7 +386,7 @@ Examples of valid command strings:
})
}
fn create_shell_command_tool(include_prefix_rule: bool) -> ToolSpec {
fn create_shell_command_tool() -> ToolSpec {
let mut properties = BTreeMap::from([
(
"command".to_string(),
@@ -422,7 +418,7 @@ fn create_shell_command_tool(include_prefix_rule: bool) -> ToolSpec {
},
),
]);
properties.extend(create_approval_parameters(include_prefix_rule));
properties.extend(create_approval_parameters());
let description = if cfg!(windows) {
r#"Runs a Powershell command (Windows) and returns its output.
@@ -1405,7 +1401,6 @@ pub(crate) fn build_specs(
dynamic_tools: &[DynamicToolSpec],
) -> ToolRegistryBuilder {
use crate::tools::handlers::ApplyPatchHandler;
use crate::tools::handlers::CollabHandler;
use crate::tools::handlers::DynamicToolHandler;
use crate::tools::handlers::GrepFilesHandler;
use crate::tools::handlers::JsReplHandler;
@@ -1413,6 +1408,7 @@ pub(crate) fn build_specs(
use crate::tools::handlers::ListDirHandler;
use crate::tools::handlers::McpHandler;
use crate::tools::handlers::McpResourceHandler;
use crate::tools::handlers::MultiAgentHandler;
use crate::tools::handlers::PlanHandler;
use crate::tools::handlers::ReadFileHandler;
use crate::tools::handlers::RequestUserInputHandler;
@@ -1442,19 +1438,13 @@ pub(crate) fn build_specs(
match &config.shell_type {
ConfigShellToolType::Default => {
builder.push_spec_with_parallel_support(
create_shell_tool(config.request_rule_enabled),
true,
);
builder.push_spec_with_parallel_support(create_shell_tool(), true);
}
ConfigShellToolType::Local => {
builder.push_spec_with_parallel_support(ToolSpec::LocalShell {}, true);
}
ConfigShellToolType::UnifiedExec => {
builder.push_spec_with_parallel_support(
create_exec_command_tool(config.request_rule_enabled),
true,
);
builder.push_spec_with_parallel_support(create_exec_command_tool(), true);
builder.push_spec(create_write_stdin_tool());
builder.register_handler("exec_command", unified_exec_handler.clone());
builder.register_handler("write_stdin", unified_exec_handler);
@@ -1463,10 +1453,7 @@ pub(crate) fn build_specs(
// Do nothing.
}
ConfigShellToolType::ShellCommand => {
builder.push_spec_with_parallel_support(
create_shell_command_tool(config.request_rule_enabled),
true,
);
builder.push_spec_with_parallel_support(create_shell_command_tool(), true);
}
}
@@ -1478,12 +1465,14 @@ pub(crate) fn build_specs(
builder.register_handler("shell_command", shell_command_handler);
}
builder.push_spec_with_parallel_support(create_list_mcp_resources_tool(), true);
builder.push_spec_with_parallel_support(create_list_mcp_resource_templates_tool(), true);
builder.push_spec_with_parallel_support(create_read_mcp_resource_tool(), true);
builder.register_handler("list_mcp_resources", mcp_resource_handler.clone());
builder.register_handler("list_mcp_resource_templates", mcp_resource_handler.clone());
builder.register_handler("read_mcp_resource", mcp_resource_handler);
if mcp_tools.is_some() {
builder.push_spec_with_parallel_support(create_list_mcp_resources_tool(), true);
builder.push_spec_with_parallel_support(create_list_mcp_resource_templates_tool(), true);
builder.push_spec_with_parallel_support(create_read_mcp_resource_tool(), true);
builder.register_handler("list_mcp_resources", mcp_resource_handler.clone());
builder.register_handler("list_mcp_resource_templates", mcp_resource_handler.clone());
builder.register_handler("read_mcp_resource", mcp_resource_handler);
}
builder.push_spec(PLAN_TOOL.clone());
builder.register_handler("update_plan", plan_handler);
@@ -1574,17 +1563,17 @@ pub(crate) fn build_specs(
builder.register_handler("view_image", view_image_handler);
if config.collab_tools {
let collab_handler = Arc::new(CollabHandler);
let multi_agent_handler = Arc::new(MultiAgentHandler);
builder.push_spec(create_spawn_agent_tool());
builder.push_spec(create_send_input_tool());
builder.push_spec(create_resume_agent_tool());
builder.push_spec(create_wait_tool());
builder.push_spec(create_close_agent_tool());
builder.register_handler("spawn_agent", collab_handler.clone());
builder.register_handler("send_input", collab_handler.clone());
builder.register_handler("resume_agent", collab_handler.clone());
builder.register_handler("wait", collab_handler.clone());
builder.register_handler("close_agent", collab_handler);
builder.register_handler("spawn_agent", multi_agent_handler.clone());
builder.register_handler("send_input", multi_agent_handler.clone());
builder.register_handler("resume_agent", multi_agent_handler.clone());
builder.register_handler("wait", multi_agent_handler.clone());
builder.register_handler("close_agent", multi_agent_handler);
}
if let Some(mcp_tools) = mcp_tools {
@@ -1830,11 +1819,8 @@ mod tests {
// Build expected from the same helpers used by the builder.
let mut expected: BTreeMap<String, ToolSpec> = BTreeMap::from([]);
for spec in [
create_exec_command_tool(true),
create_exec_command_tool(),
create_write_stdin_tool(),
create_list_mcp_resources_tool(),
create_list_mcp_resource_templates_tool(),
create_read_mcp_resource_tool(),
PLAN_TOOL.clone(),
create_request_user_input_tool(),
create_apply_patch_freeform_tool(),
@@ -2039,7 +2025,7 @@ mod tests {
features,
web_search_mode,
});
let (tools, _) = build_specs(&tools_config, Some(HashMap::new()), None, &[]).build();
let (tools, _) = build_specs(&tools_config, None, None, &[]).build();
let tool_names = tools.iter().map(|t| t.spec.name()).collect::<Vec<_>>();
assert_eq!(&tool_names, &expected_tools,);
}
@@ -2106,6 +2092,53 @@ mod tests {
);
}
#[test]
fn mcp_resource_tools_are_hidden_without_mcp_servers() {
let config = test_config();
let model_info =
ModelsManager::construct_model_info_offline_for_tests("gpt-5-codex", &config);
let mut features = Features::with_defaults();
features.enable(Feature::CollaborationModes);
let tools_config = ToolsConfig::new(&ToolsConfigParams {
model_info: &model_info,
features: &features,
web_search_mode: Some(WebSearchMode::Cached),
});
let (tools, _) = build_specs(&tools_config, None, None, &[]).build();
assert!(
!tools.iter().any(|tool| matches!(
tool.spec.name(),
"list_mcp_resources" | "list_mcp_resource_templates" | "read_mcp_resource"
)),
"MCP resource tools should be omitted when no MCP servers are configured"
);
}
#[test]
fn mcp_resource_tools_are_included_when_mcp_servers_are_present() {
let config = test_config();
let model_info =
ModelsManager::construct_model_info_offline_for_tests("gpt-5-codex", &config);
let mut features = Features::with_defaults();
features.enable(Feature::CollaborationModes);
let tools_config = ToolsConfig::new(&ToolsConfigParams {
model_info: &model_info,
features: &features,
web_search_mode: Some(WebSearchMode::Cached),
});
let (tools, _) = build_specs(&tools_config, Some(HashMap::new()), None, &[]).build();
assert_contains_tool_names(
&tools,
&[
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
],
);
}
#[test]
fn test_build_specs_gpt5_codex_default() {
let mut features = Features::with_defaults();
@@ -2116,9 +2149,6 @@ mod tests {
Some(WebSearchMode::Cached),
"shell_command",
&[
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"apply_patch",
@@ -2138,9 +2168,6 @@ mod tests {
Some(WebSearchMode::Cached),
"shell_command",
&[
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"apply_patch",
@@ -2162,9 +2189,6 @@ mod tests {
&[
"exec_command",
"write_stdin",
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"apply_patch",
@@ -2186,9 +2210,6 @@ mod tests {
&[
"exec_command",
"write_stdin",
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"apply_patch",
@@ -2208,9 +2229,6 @@ mod tests {
Some(WebSearchMode::Cached),
"shell_command",
&[
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"apply_patch",
@@ -2230,9 +2248,6 @@ mod tests {
Some(WebSearchMode::Cached),
"shell_command",
&[
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"apply_patch",
@@ -2252,9 +2267,6 @@ mod tests {
Some(WebSearchMode::Cached),
"shell",
&[
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"web_search",
@@ -2273,9 +2285,6 @@ mod tests {
Some(WebSearchMode::Cached),
"shell_command",
&[
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"apply_patch",
@@ -2297,9 +2306,6 @@ mod tests {
&[
"exec_command",
"write_stdin",
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"apply_patch",
@@ -2796,7 +2802,7 @@ mod tests {
#[test]
fn test_shell_tool() {
let tool = super::create_shell_tool(true);
let tool = super::create_shell_tool();
let ToolSpec::Function(ResponsesApiTool {
description, name, ..
}) = &tool
@@ -2826,7 +2832,7 @@ Examples of valid command strings:
#[test]
fn test_shell_command_tool() {
let tool = super::create_shell_command_tool(true);
let tool = super::create_shell_command_tool();
let ToolSpec::Function(ResponsesApiTool {
description, name, ..
}) = &tool

View File

@@ -24,5 +24,5 @@ Notes:
- `description`
- `connector_name`
- input schema property keys (`input_keys`)
- If the needed app is already explicit in the prompt (for example an `apps://...` mention) or already present in the current `tools` list, you can call that tool directly.
- If the needed app is already explicit in the prompt (for example `[$app-name](app://{connector_id})`) or already present in the current `tools` list, you can call that tool directly.
- Do not use `search_tool_bm25` for non-apps/local tasks (filesystem, repo search, or shell-only workflows) or anything not related to {{app_names}}.

View File

@@ -75,9 +75,6 @@ async fn model_selects_expected_tools() {
expected_default_tools(
"shell_command",
&[
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"apply_patch",
@@ -94,9 +91,6 @@ async fn model_selects_expected_tools() {
expected_default_tools(
"shell_command",
&[
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"apply_patch",
@@ -113,9 +107,6 @@ async fn model_selects_expected_tools() {
expected_default_tools(
"shell_command",
&[
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"apply_patch",
@@ -132,9 +123,6 @@ async fn model_selects_expected_tools() {
expected_default_tools(
"shell",
&[
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"web_search",
@@ -150,9 +138,6 @@ async fn model_selects_expected_tools() {
expected_default_tools(
"shell_command",
&[
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"apply_patch",

View File

@@ -488,7 +488,6 @@ async fn permissions_message_includes_writable_roots() -> Result<()> {
&sandbox_policy,
AskForApproval::OnRequest,
&Policy::empty(),
true,
test.config.cwd.as_path(),
)
.into_text();

View File

@@ -144,9 +144,6 @@ async fn prompt_tools_are_consistent_across_requests() -> anyhow::Result<()> {
vec!["exec_command", "write_stdin"]
};
expected_tools_names.extend([
"list_mcp_resources",
"list_mcp_resource_templates",
"read_mcp_resource",
"update_plan",
"request_user_input",
"apply_patch",

View File

@@ -279,7 +279,7 @@ impl CodexLogSnapshot {
}
let level = match classification {
"bug" | "bad_result" => Level::Error,
"bug" | "bad_result" | "safety_check" => Level::Error,
_ => Level::Info,
};
@@ -342,6 +342,7 @@ fn display_classification(classification: &str) -> String {
"bug" => "Bug".to_string(),
"bad_result" => "Bad result".to_string(),
"good_result" => "Good result".to_string(),
"safety_check" => "Safety check".to_string(),
_ => "Other".to_string(),
}
}

View File

@@ -225,8 +225,6 @@ const APPROVAL_POLICY_UNLESS_TRUSTED: &str =
include_str!("prompts/permissions/approval_policy/unless_trusted.md");
const APPROVAL_POLICY_ON_FAILURE: &str =
include_str!("prompts/permissions/approval_policy/on_failure.md");
const APPROVAL_POLICY_ON_REQUEST: &str =
include_str!("prompts/permissions/approval_policy/on_request.md");
const APPROVAL_POLICY_ON_REQUEST_RULE: &str =
include_str!("prompts/permissions/approval_policy/on_request_rule.md");
@@ -241,29 +239,20 @@ impl DeveloperInstructions {
Self { text: text.into() }
}
pub fn from(
approval_policy: AskForApproval,
exec_policy: &Policy,
request_rule_enabled: bool,
) -> DeveloperInstructions {
pub fn from(approval_policy: AskForApproval, exec_policy: &Policy) -> DeveloperInstructions {
let text = match approval_policy {
AskForApproval::Never => APPROVAL_POLICY_NEVER.to_string(),
AskForApproval::UnlessTrusted => APPROVAL_POLICY_UNLESS_TRUSTED.to_string(),
AskForApproval::OnFailure => APPROVAL_POLICY_ON_FAILURE.to_string(),
AskForApproval::OnRequest => {
if !request_rule_enabled {
APPROVAL_POLICY_ON_REQUEST.to_string()
} else {
let command_prefixes =
format_allow_prefixes(exec_policy.get_allowed_prefixes());
match command_prefixes {
Some(prefixes) => {
format!(
"{APPROVAL_POLICY_ON_REQUEST_RULE}\n## Approved command prefixes\nThe following prefix rules have already been approved: {prefixes}"
)
}
None => APPROVAL_POLICY_ON_REQUEST_RULE.to_string(),
let command_prefixes = format_allow_prefixes(exec_policy.get_allowed_prefixes());
match command_prefixes {
Some(prefixes) => {
format!(
"{APPROVAL_POLICY_ON_REQUEST_RULE}\n## Approved command prefixes\nThe following prefix rules have already been approved: {prefixes}"
)
}
None => APPROVAL_POLICY_ON_REQUEST_RULE.to_string(),
}
}
};
@@ -301,7 +290,6 @@ impl DeveloperInstructions {
sandbox_policy: &SandboxPolicy,
approval_policy: AskForApproval,
exec_policy: &Policy,
request_rule_enabled: bool,
cwd: &Path,
) -> Self {
let network_access = if sandbox_policy.has_full_network_access() {
@@ -325,7 +313,6 @@ impl DeveloperInstructions {
network_access,
approval_policy,
exec_policy,
request_rule_enabled,
writable_roots,
)
}
@@ -349,7 +336,6 @@ impl DeveloperInstructions {
network_access: NetworkAccess,
approval_policy: AskForApproval,
exec_policy: &Policy,
request_rule_enabled: bool,
writable_roots: Option<Vec<WritableRoot>>,
) -> Self {
let start_tag = DeveloperInstructions::new("<permissions instructions>");
@@ -359,11 +345,7 @@ impl DeveloperInstructions {
sandbox_mode,
network_access,
))
.concat(DeveloperInstructions::from(
approval_policy,
exec_policy,
request_rule_enabled,
))
.concat(DeveloperInstructions::from(approval_policy, exec_policy))
.concat(DeveloperInstructions::from_writable_roots(writable_roots))
.concat(end_tag)
}
@@ -1207,7 +1189,6 @@ mod tests {
NetworkAccess::Enabled,
AskForApproval::OnRequest,
&Policy::empty(),
false,
None,
);
@@ -1217,7 +1198,7 @@ mod tests {
"expected network access to be enabled in message"
);
assert!(
text.contains("`approval_policy` is `on-request`"),
text.contains("How to request escalation"),
"expected approval guidance to be included"
);
}
@@ -1236,7 +1217,6 @@ mod tests {
&policy,
AskForApproval::UnlessTrusted,
&Policy::empty(),
false,
&PathBuf::from("/tmp"),
);
let text = instructions.into_text();
@@ -1245,7 +1225,7 @@ mod tests {
}
#[test]
fn includes_request_rule_instructions_when_enabled() {
fn includes_request_rule_instructions_for_on_request() {
let mut exec_policy = Policy::empty();
exec_policy
.add_prefix_rule(
@@ -1258,7 +1238,6 @@ mod tests {
NetworkAccess::Enabled,
AskForApproval::OnRequest,
&exec_policy,
true,
None,
);

View File

@@ -1,12 +0,0 @@
Approvals are your mechanism to get user consent to run shell commands without the sandbox. `approval_policy` is `on-request`: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task.
Here are scenarios where you'll need to request approval:
- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var)
- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files.
- You are running sandboxed and need to run a command that requires network access (e.g. installing packages)
- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `sandbox_permissions` and `justification` parameters - do not message the user before requesting approval for the command.
- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for.
When requesting approval to execute a command that will require escalated privileges:
- Provide the `sandbox_permissions` parameter with the value `"require_escalated"`
- Include a short, 1 sentence explanation for why you need escalated permissions in the justification parameter

View File

@@ -371,5 +371,6 @@ pub(crate) enum FeedbackCategory {
BadResult,
GoodResult,
Bug,
SafetyCheck,
Other,
}

View File

@@ -353,6 +353,10 @@ fn feedback_title_and_placeholder(category: FeedbackCategory) -> (String, String
"Tell us more (bug)".to_string(),
"(optional) Write a short description to help us further".to_string(),
),
FeedbackCategory::SafetyCheck => (
"Tell us more (safety check)".to_string(),
"(optional) Share what was refused and why it should have been allowed".to_string(),
),
FeedbackCategory::Other => (
"Tell us more (other)".to_string(),
"(optional) Write a short description to help us further".to_string(),
@@ -365,6 +369,7 @@ fn feedback_classification(category: FeedbackCategory) -> &'static str {
FeedbackCategory::BadResult => "bad_result",
FeedbackCategory::GoodResult => "good_result",
FeedbackCategory::Bug => "bug",
FeedbackCategory::SafetyCheck => "safety_check",
FeedbackCategory::Other => "other",
}
}
@@ -378,14 +383,15 @@ fn issue_url_for_category(
// the external GitHub behavior identical while routing internal users to
// the internal go link.
match category {
FeedbackCategory::Bug | FeedbackCategory::BadResult | FeedbackCategory::Other => {
Some(match feedback_audience {
FeedbackAudience::OpenAiEmployee => slack_feedback_url(thread_id),
FeedbackAudience::External => {
format!("{BASE_BUG_ISSUE_URL}&steps=Uploaded%20thread:%20{thread_id}")
}
})
}
FeedbackCategory::Bug
| FeedbackCategory::BadResult
| FeedbackCategory::SafetyCheck
| FeedbackCategory::Other => Some(match feedback_audience {
FeedbackAudience::OpenAiEmployee => slack_feedback_url(thread_id),
FeedbackAudience::External => {
format!("{BASE_BUG_ISSUE_URL}&steps=Uploaded%20thread:%20{thread_id}")
}
}),
FeedbackCategory::GoodResult => None,
}
}
@@ -423,6 +429,12 @@ pub(crate) fn feedback_selection_params(
"Helpful, correct, highquality, or delightful result worth celebrating.",
FeedbackCategory::GoodResult,
),
make_feedback_item(
app_event_tx.clone(),
"safety check",
"Benign usage blocked due to safety checks or refusals.",
FeedbackCategory::SafetyCheck,
),
make_feedback_item(
app_event_tx,
"other",
@@ -616,7 +628,14 @@ mod tests {
}
#[test]
fn issue_url_available_for_bug_bad_result_and_other() {
fn feedback_view_safety_check() {
let view = make_view(FeedbackCategory::SafetyCheck);
let rendered = render(&view, 60);
insta::assert_snapshot!("feedback_view_safety_check", rendered);
}
#[test]
fn issue_url_available_for_bug_bad_result_safety_check_and_other() {
let bug_url = issue_url_for_category(
FeedbackCategory::Bug,
"thread-1",
@@ -639,6 +658,13 @@ mod tests {
);
assert!(other_url.is_some());
let safety_check_url = issue_url_for_category(
FeedbackCategory::SafetyCheck,
"thread-4",
FeedbackAudience::OpenAiEmployee,
);
assert!(safety_check_url.is_some());
assert!(
issue_url_for_category(
FeedbackCategory::GoodResult,

View File

@@ -0,0 +1,9 @@
---
source: tui/src/bottom_pane/feedback_view.rs
expression: rendered
---
▌ Tell us more (safety check)
▌ (optional) Share what was refused and why it should have b
Press enter to confirm or esc to go back

View File

@@ -184,7 +184,6 @@ use crate::bottom_pane::SelectionViewParams;
use crate::bottom_pane::custom_prompt_view::CustomPromptView;
use crate::bottom_pane::popup_consts::standard_popup_hint_line;
use crate::clipboard_paste::paste_image_to_temp_png;
use crate::collab;
use crate::collaboration_modes;
use crate::diff_render::display_path_for;
use crate::exec_cell::CommandOutput;
@@ -201,6 +200,7 @@ use crate::history_cell::WebSearchCell;
use crate::key_hint;
use crate::key_hint::KeyBinding;
use crate::markdown::append_markdown;
use crate::multi_agents;
use crate::render::Insets;
use crate::render::renderable::ColumnRenderable;
use crate::render::renderable::FlexRenderable;
@@ -4113,17 +4113,19 @@ impl ChatWidget {
EventMsg::ExitedReviewMode(review) => self.on_exited_review_mode(review),
EventMsg::ContextCompacted(_) => self.on_agent_message("Context compacted".to_owned()),
EventMsg::CollabAgentSpawnBegin(_) => {}
EventMsg::CollabAgentSpawnEnd(ev) => self.on_collab_event(collab::spawn_end(ev)),
EventMsg::CollabAgentSpawnEnd(ev) => self.on_collab_event(multi_agents::spawn_end(ev)),
EventMsg::CollabAgentInteractionBegin(_) => {}
EventMsg::CollabAgentInteractionEnd(ev) => {
self.on_collab_event(collab::interaction_end(ev))
self.on_collab_event(multi_agents::interaction_end(ev))
}
EventMsg::CollabWaitingBegin(ev) => self.on_collab_event(collab::waiting_begin(ev)),
EventMsg::CollabWaitingEnd(ev) => self.on_collab_event(collab::waiting_end(ev)),
EventMsg::CollabWaitingBegin(ev) => {
self.on_collab_event(multi_agents::waiting_begin(ev))
}
EventMsg::CollabWaitingEnd(ev) => self.on_collab_event(multi_agents::waiting_end(ev)),
EventMsg::CollabCloseBegin(_) => {}
EventMsg::CollabCloseEnd(ev) => self.on_collab_event(collab::close_end(ev)),
EventMsg::CollabResumeBegin(ev) => self.on_collab_event(collab::resume_begin(ev)),
EventMsg::CollabResumeEnd(ev) => self.on_collab_event(collab::resume_end(ev)),
EventMsg::CollabCloseEnd(ev) => self.on_collab_event(multi_agents::close_end(ev)),
EventMsg::CollabResumeBegin(ev) => self.on_collab_event(multi_agents::resume_begin(ev)),
EventMsg::CollabResumeEnd(ev) => self.on_collab_event(multi_agents::resume_end(ev)),
EventMsg::ThreadRolledBack(rollback) => {
if from_replay {
self.app_event_tx.send(AppEvent::ApplyThreadRollback {

View File

@@ -4,8 +4,10 @@ expression: popup
---
How was this?
1. bug Crash, error message, hang, or broken UI/behavior.
2. bad result Output was off-target, incorrect, incomplete, or unhelpful.
3. good result Helpful, correct, highquality, or delightful result worth
celebrating.
4. other Slowness, feature suggestion, UX feedback, or anything else.
1. bug Crash, error message, hang, or broken UI/behavior.
2. bad result Output was off-target, incorrect, incomplete, or unhelpful.
3. good result Helpful, correct, highquality, or delightful result worth
celebrating.
4. safety check Benign usage blocked due to safety checks or refusals.
5. other Slowness, feature suggestion, UX feedback, or anything
else.

View File

@@ -64,7 +64,6 @@ mod bottom_pane;
mod chatwidget;
mod cli;
mod clipboard_paste;
mod collab;
mod collaboration_modes;
mod color;
pub mod custom_terminal;
@@ -86,6 +85,7 @@ mod markdown_render;
mod markdown_stream;
mod mention_codec;
mod model_migration;
mod multi_agents;
mod notifications;
pub mod onboarding;
mod oss_selection;

View File

@@ -218,9 +218,11 @@ impl Renderable for StatusIndicatorWidget {
return;
}
// Schedule next animation frame.
self.frame_requester
.schedule_frame_in(Duration::from_millis(32));
if self.animations_enabled {
// Schedule next animation frame.
self.frame_requester
.schedule_frame_in(Duration::from_millis(32));
}
let now = Instant::now();
let elapsed_duration = self.elapsed_duration_at(now);
let pretty_elapsed = fmt_elapsed_compact(elapsed_duration.as_secs());