mirror of
https://github.com/openai/codex.git
synced 2026-04-24 14:45:27 +00:00
[codex] move user_explicitly_approved flag into SafetyCheck::AutoApprove
This commit is contained in:
@@ -52,16 +52,10 @@ pub(crate) async fn apply_patch(
|
||||
&turn_context.sandbox_policy,
|
||||
&turn_context.cwd,
|
||||
) {
|
||||
SafetyCheck::UserAutoApprove { .. } => {
|
||||
SafetyCheck::AutoApprove { user_explicitly_approved, .. } => {
|
||||
InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec {
|
||||
action,
|
||||
user_explicitly_approved_this_action: true,
|
||||
})
|
||||
}
|
||||
SafetyCheck::AutoApprove { .. } => {
|
||||
InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec {
|
||||
action,
|
||||
user_explicitly_approved_this_action: false,
|
||||
user_explicitly_approved_this_action: user_explicitly_approved,
|
||||
})
|
||||
}
|
||||
SafetyCheck::AskUser => {
|
||||
|
||||
@@ -2898,8 +2898,9 @@ async fn handle_container_exec_with_params(
|
||||
justification: params.justification.clone(),
|
||||
};
|
||||
let safety = if *user_explicitly_approved_this_action {
|
||||
SafetyCheck::UserAutoApprove {
|
||||
SafetyCheck::AutoApprove {
|
||||
sandbox_type: SandboxType::None,
|
||||
user_explicitly_approved: true,
|
||||
}
|
||||
} else {
|
||||
assess_safety_for_untrusted_command(
|
||||
@@ -2931,22 +2932,19 @@ async fn handle_container_exec_with_params(
|
||||
};
|
||||
|
||||
let sandbox_type = match safety {
|
||||
SafetyCheck::UserAutoApprove { sandbox_type } => {
|
||||
SafetyCheck::AutoApprove {
|
||||
sandbox_type,
|
||||
user_explicitly_approved,
|
||||
} => {
|
||||
otel_event_manager.tool_decision(
|
||||
tool_name,
|
||||
call_id.as_str(),
|
||||
ReviewDecision::Approved,
|
||||
ToolDecisionSource::User,
|
||||
);
|
||||
|
||||
sandbox_type
|
||||
}
|
||||
SafetyCheck::AutoApprove { sandbox_type } => {
|
||||
otel_event_manager.tool_decision(
|
||||
tool_name,
|
||||
call_id.as_str(),
|
||||
ReviewDecision::Approved,
|
||||
ToolDecisionSource::Config,
|
||||
if user_explicitly_approved {
|
||||
ToolDecisionSource::User
|
||||
} else {
|
||||
ToolDecisionSource::Config
|
||||
},
|
||||
);
|
||||
|
||||
sandbox_type
|
||||
|
||||
@@ -13,10 +13,14 @@ use crate::protocol::SandboxPolicy;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum SafetyCheck {
|
||||
UserAutoApprove { sandbox_type: SandboxType },
|
||||
AutoApprove { sandbox_type: SandboxType },
|
||||
AutoApprove {
|
||||
sandbox_type: SandboxType,
|
||||
user_explicitly_approved: bool,
|
||||
},
|
||||
AskUser,
|
||||
Reject { reason: String },
|
||||
Reject {
|
||||
reason: String,
|
||||
},
|
||||
}
|
||||
|
||||
pub fn assess_patch_safety(
|
||||
@@ -53,12 +57,16 @@ pub fn assess_patch_safety(
|
||||
// fall back to asking the user because the patch may touch arbitrary
|
||||
// paths outside the project.
|
||||
match get_platform_sandbox() {
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove { sandbox_type },
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove {
|
||||
sandbox_type,
|
||||
user_explicitly_approved: false,
|
||||
},
|
||||
None if sandbox_policy == &SandboxPolicy::DangerFullAccess => {
|
||||
// If the user has explicitly requested DangerFullAccess, then
|
||||
// we can auto-approve even without a sandbox.
|
||||
SafetyCheck::AutoApprove {
|
||||
sandbox_type: SandboxType::None,
|
||||
user_explicitly_approved: false,
|
||||
}
|
||||
}
|
||||
None => SafetyCheck::AskUser,
|
||||
@@ -102,6 +110,7 @@ pub fn assess_command_safety(
|
||||
if is_known_safe_command(command) || approved.contains(command) {
|
||||
return SafetyCheck::AutoApprove {
|
||||
sandbox_type: SandboxType::None,
|
||||
user_explicitly_approved: false,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -127,13 +136,17 @@ pub(crate) fn assess_safety_for_untrusted_command(
|
||||
| (Never, DangerFullAccess)
|
||||
| (OnRequest, DangerFullAccess) => SafetyCheck::AutoApprove {
|
||||
sandbox_type: SandboxType::None,
|
||||
user_explicitly_approved: false,
|
||||
},
|
||||
(OnRequest, ReadOnly) | (OnRequest, WorkspaceWrite { .. }) => {
|
||||
if with_escalated_permissions {
|
||||
SafetyCheck::AskUser
|
||||
} else {
|
||||
match get_platform_sandbox() {
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove { sandbox_type },
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove {
|
||||
sandbox_type,
|
||||
user_explicitly_approved: false,
|
||||
},
|
||||
// Fall back to asking since the command is untrusted and
|
||||
// we do not have a sandbox available
|
||||
None => SafetyCheck::AskUser,
|
||||
@@ -145,7 +158,10 @@ pub(crate) fn assess_safety_for_untrusted_command(
|
||||
| (OnFailure, ReadOnly)
|
||||
| (OnFailure, WorkspaceWrite { .. }) => {
|
||||
match get_platform_sandbox() {
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove { sandbox_type },
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove {
|
||||
sandbox_type,
|
||||
user_explicitly_approved: false,
|
||||
},
|
||||
None => {
|
||||
if matches!(approval_policy, OnFailure) {
|
||||
// Since the command is not trusted, even though the
|
||||
@@ -343,7 +359,10 @@ mod tests {
|
||||
);
|
||||
|
||||
let expected = match get_platform_sandbox() {
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove { sandbox_type },
|
||||
Some(sandbox_type) => SafetyCheck::AutoApprove {
|
||||
sandbox_type,
|
||||
user_explicitly_approved: false,
|
||||
},
|
||||
None => SafetyCheck::AskUser,
|
||||
};
|
||||
assert_eq!(safety_check, expected);
|
||||
|
||||
Reference in New Issue
Block a user