This commit is contained in:
jimmyfraiture
2025-09-29 13:56:57 +01:00
parent 8a7f75eeef
commit 4562946b3b
22 changed files with 3810 additions and 717 deletions

1
codex-rs/Cargo.lock generated
View File

@@ -584,6 +584,7 @@ dependencies = [
"codex-file-search",
"codex-protocol",
"core_test_support",
"futures",
"libc",
"mcp-types",
"portable-pty",

View File

@@ -27,6 +27,7 @@ time = { workspace = true, features = ["formatting", "parsing", "local-offset",
tracing = { workspace = true }
tree-sitter = { workspace = true }
tree-sitter-bash = { workspace = true }
futures = { workspace = true }
[dev-dependencies]
core_test_support = { workspace = true }

View File

@@ -0,0 +1,363 @@
use crate::model_family::ModelFamily;
use crate::tool_schema::OpenAiTool;
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::config_types::Verbosity as VerbosityConfig;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::RateLimitSnapshot;
use codex_protocol::protocol::TokenUsage;
use futures::Stream;
use serde::Serialize;
use serde_json::Value;
use std::borrow::Cow;
use std::ops::Deref;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
use tokio::sync::mpsc;
/// Review thread system prompt. Edit `agent/review_prompt.md` to customize.
pub const REVIEW_PROMPT: &str = include_str!("../review_prompt.md");
/// API request payload for a single model turn
#[derive(Default, Debug, Clone)]
pub struct Prompt {
/// Conversation context input items.
pub input: Vec<ResponseItem>,
/// Tools available to the model, including additional tools sourced from
/// external MCP servers.
pub tools: Vec<OpenAiTool>,
/// Optional override for the built-in BASE_INSTRUCTIONS.
pub base_instructions_override: Option<String>,
/// Optional the output schema for the model's response.
pub output_schema: Option<Value>,
}
impl Prompt {
pub fn get_full_instructions<'a>(&'a self, model: &'a ModelFamily) -> Cow<'a, str> {
let base = self
.base_instructions_override
.as_deref()
.unwrap_or(model.base_instructions.deref());
// When there are no custom instructions, add apply_patch_tool_instructions if:
// - the model needs special instructions (4.1)
// AND
// - there is no apply_patch tool present
let is_apply_patch_tool_present = self.tools.iter().any(|tool| match tool {
OpenAiTool::Function(f) => f.name == "apply_patch",
OpenAiTool::Freeform(f) => f.name == "apply_patch",
_ => false,
});
if self.base_instructions_override.is_none()
&& model.needs_special_apply_patch_instructions
&& !is_apply_patch_tool_present
{
Cow::Owned(format!("{base}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}"))
} else {
Cow::Borrowed(base)
}
}
pub fn get_formatted_input(&self) -> Vec<ResponseItem> {
self.input.clone()
}
}
#[derive(Debug)]
pub enum ResponseEvent {
Created,
OutputItemDone(ResponseItem),
Completed {
response_id: String,
token_usage: Option<TokenUsage>,
},
OutputTextDelta(String),
ReasoningSummaryDelta(String),
ReasoningContentDelta(String),
ReasoningSummaryPartAdded,
WebSearchCallBegin {
call_id: String,
},
RateLimits(RateLimitSnapshot),
}
#[derive(Debug, Serialize)]
pub struct Reasoning {
#[serde(skip_serializing_if = "Option::is_none")]
pub effort: Option<ReasoningEffortConfig>,
#[serde(skip_serializing_if = "Option::is_none")]
pub summary: Option<ReasoningSummaryConfig>,
}
#[derive(Debug, Serialize, Default, Clone)]
#[serde(rename_all = "snake_case")]
pub enum TextFormatType {
#[default]
JsonSchema,
}
#[derive(Debug, Serialize, Default, Clone)]
pub struct TextFormat {
pub r#type: TextFormatType,
pub strict: bool,
pub schema: Value,
pub name: String,
}
/// Controls under the `text` field in the Responses API for GPT-5.
#[derive(Debug, Serialize, Default, Clone)]
pub struct TextControls {
#[serde(skip_serializing_if = "Option::is_none")]
pub verbosity: Option<OpenAiVerbosity>,
#[serde(skip_serializing_if = "Option::is_none")]
pub format: Option<TextFormat>,
}
#[derive(Debug, Serialize, Default, Clone)]
#[serde(rename_all = "lowercase")]
pub enum OpenAiVerbosity {
Low,
#[default]
Medium,
High,
}
impl From<VerbosityConfig> for OpenAiVerbosity {
fn from(v: VerbosityConfig) -> Self {
match v {
VerbosityConfig::Low => OpenAiVerbosity::Low,
VerbosityConfig::Medium => OpenAiVerbosity::Medium,
VerbosityConfig::High => OpenAiVerbosity::High,
}
}
}
/// Request object that is serialized as JSON and POST'ed when using the
/// Responses API.
#[derive(Debug, Serialize)]
pub struct ResponsesApiRequest<'a> {
pub model: &'a str,
pub instructions: &'a str,
// TODO(mbolin): ResponseItem::Other should not be serialized. Currently,
// we code defensively to avoid this case, but perhaps we should use a
// separate enum for serialization.
pub input: &'a Vec<ResponseItem>,
pub tools: &'a [serde_json::Value],
pub tool_choice: &'static str,
pub parallel_tool_calls: bool,
pub reasoning: Option<Reasoning>,
pub store: bool,
pub stream: bool,
pub include: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub prompt_cache_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub text: Option<TextControls>,
}
pub fn create_reasoning_param_for_request(
model_family: &ModelFamily,
effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig,
) -> Option<Reasoning> {
if !model_family.supports_reasoning_summaries {
return None;
}
Some(Reasoning {
effort,
summary: Some(summary),
})
}
pub fn create_text_param_for_request(
verbosity: Option<VerbosityConfig>,
output_schema: &Option<Value>,
) -> Option<TextControls> {
if verbosity.is_none() && output_schema.is_none() {
return None;
}
Some(TextControls {
verbosity: verbosity.map(std::convert::Into::into),
format: output_schema.as_ref().map(|schema| TextFormat {
r#type: TextFormatType::JsonSchema,
strict: true,
schema: schema.clone(),
name: "codex_output_schema".to_string(),
}),
})
}
pub struct ResponseStream<E> {
pub rx_event: mpsc::Receiver<std::result::Result<ResponseEvent, E>>,
}
impl<E> Stream for ResponseStream<E> {
type Item = std::result::Result<ResponseEvent, E>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<std::result::Result<ResponseEvent, E>>> {
self.rx_event.poll_recv(cx)
}
}
#[cfg(test)]
mod tests {
use crate::config_types::ReasoningSummaryFormat;
use crate::tooling::ApplyPatchToolType;
use pretty_assertions::assert_eq;
use super::*;
struct InstructionsTestCase {
pub slug: &'static str,
pub expects_apply_patch_instructions: bool,
}
#[test]
fn get_full_instructions_no_user_content() {
let prompt = Prompt::default();
let base_instructions = "Base instructions".to_string();
let test_cases = vec![
InstructionsTestCase {
slug: "needs-apply-patch",
expects_apply_patch_instructions: true,
},
InstructionsTestCase {
slug: "no-apply-patch",
expects_apply_patch_instructions: false,
},
];
for test_case in test_cases {
let model_family = ModelFamily {
slug: test_case.slug.to_string(),
family: "test".to_string(),
needs_special_apply_patch_instructions: test_case.expects_apply_patch_instructions,
supports_reasoning_summaries: false,
reasoning_summary_format: ReasoningSummaryFormat::None,
uses_local_shell_tool: false,
apply_patch_tool_type: Some(ApplyPatchToolType::Function),
base_instructions: base_instructions.clone(),
};
let expected = if test_case.expects_apply_patch_instructions {
format!(
"{}\n{}",
model_family.base_instructions, APPLY_PATCH_TOOL_INSTRUCTIONS
)
} else {
model_family.base_instructions.clone()
};
let full = prompt.get_full_instructions(&model_family);
assert_eq!(full, expected);
}
}
#[test]
fn serializes_text_verbosity_when_set() {
let input: Vec<ResponseItem> = vec![];
let tools: Vec<serde_json::Value> = vec![];
let req = ResponsesApiRequest {
model: "gpt-5",
instructions: "i",
input: &input,
tools: &tools,
tool_choice: "auto",
parallel_tool_calls: false,
reasoning: None,
store: false,
stream: true,
include: vec![],
prompt_cache_key: None,
text: Some(TextControls {
verbosity: Some(OpenAiVerbosity::Low),
format: None,
}),
};
let v = serde_json::to_value(&req).expect("json");
assert_eq!(
v.get("text")
.and_then(|t| t.get("verbosity"))
.and_then(|s| s.as_str()),
Some("low")
);
}
#[test]
fn serializes_text_schema_with_strict_format() {
let input: Vec<ResponseItem> = vec![];
let tools: Vec<serde_json::Value> = vec![];
let schema = serde_json::json!({
"type": "object",
"properties": {
"answer": {"type": "string"}
},
"required": ["answer"],
});
let text_controls =
create_text_param_for_request(None, &Some(schema.clone())).expect("text controls");
let req = ResponsesApiRequest {
model: "gpt-5",
instructions: "i",
input: &input,
tools: &tools,
tool_choice: "auto",
parallel_tool_calls: false,
reasoning: None,
store: false,
stream: true,
include: vec![],
prompt_cache_key: None,
text: Some(text_controls),
};
let v = serde_json::to_value(&req).expect("json");
let text = v.get("text").expect("text field");
assert!(text.get("verbosity").is_none());
let format = text.get("format").expect("format field");
assert_eq!(
format.get("name"),
Some(&serde_json::Value::String("codex_output_schema".into()))
);
assert_eq!(
format.get("type"),
Some(&serde_json::Value::String("json_schema".into()))
);
assert_eq!(format.get("strict"), Some(&serde_json::Value::Bool(true)));
assert_eq!(format.get("schema"), Some(&schema));
}
#[test]
fn omits_text_when_not_set() {
let input: Vec<ResponseItem> = vec![];
let tools: Vec<serde_json::Value> = vec![];
let req = ResponsesApiRequest {
model: "gpt-5",
instructions: "i",
input: &input,
tools: &tools,
tool_choice: "auto",
parallel_tool_calls: false,
reasoning: None,
store: false,
stream: true,
include: vec![],
prompt_cache_key: None,
text: None,
};
let v = serde_json::to_value(&req).expect("json");
assert!(v.get("text").is_none());
}
}

View File

@@ -0,0 +1,21 @@
use std::collections::HashMap;
use std::path::PathBuf;
use std::time::Duration;
const DEFAULT_TIMEOUT_MS: u64 = 10_000;
#[derive(Clone, Debug)]
pub struct ExecParams {
pub command: Vec<String>,
pub cwd: PathBuf,
pub timeout_ms: Option<u64>,
pub env: HashMap<String, String>,
pub with_escalated_permissions: Option<bool>,
pub justification: Option<String>,
}
impl ExecParams {
pub fn timeout_duration(&self) -> Duration {
Duration::from_millis(self.timeout_ms.unwrap_or(DEFAULT_TIMEOUT_MS))
}
}

View File

@@ -1,10 +1,13 @@
pub mod apply_patch;
pub mod bash;
pub mod client_common;
pub mod command_safety;
pub mod config_types;
pub mod conversation_history;
pub mod exec;
pub mod exec_command;
pub mod function_tool;
pub mod model_client;
pub mod model_family;
pub mod model_provider;
pub mod notifications;
@@ -18,17 +21,22 @@ pub mod session_services;
pub mod session_state;
pub mod shell;
pub mod token_data;
pub mod tool_schema;
pub mod tooling;
pub mod tools_config;
pub mod truncate;
pub mod turn_diff_tracker;
pub mod unified_exec;
pub use apply_patch::*;
pub use bash::*;
pub use client_common::*;
pub use command_safety::*;
pub use config_types::*;
pub use conversation_history::*;
pub use exec::*;
pub use function_tool::*;
pub use model_client::*;
pub use model_family::*;
pub use model_provider::*;
pub use notifications::*;
@@ -42,7 +50,9 @@ pub use session_services::*;
pub use session_state::*;
pub use shell::*;
pub use token_data::*;
pub use tool_schema::*;
pub use tooling::*;
pub use tools_config::*;
pub use truncate::*;
pub use turn_diff_tracker::*;
pub use unified_exec::*;

View File

@@ -0,0 +1,34 @@
use std::sync::Arc;
use async_trait::async_trait;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::client_common::Prompt;
use crate::client_common::ResponseStream;
use crate::model_family::ModelFamily;
use crate::model_provider::ModelProviderInfo;
use crate::services::CredentialsProvider;
#[async_trait]
pub trait ModelClientAdapter: Send + Sync {
type Error: std::error::Error + Send + Sync + 'static;
fn get_model_context_window(&self) -> Option<u64>;
fn get_auto_compact_token_limit(&self) -> Option<i64>;
fn get_provider(&self) -> ModelProviderInfo;
fn get_model(&self) -> String;
fn get_model_family(&self) -> ModelFamily;
fn get_reasoning_effort(&self) -> Option<ReasoningEffortConfig>;
fn get_reasoning_summary(&self) -> ReasoningSummaryConfig;
fn get_auth_manager(&self) -> Option<Arc<dyn CredentialsProvider>>;
async fn stream(&self, prompt: &Prompt) -> Result<ResponseStream<Self::Error>, Self::Error>;
}

View File

@@ -0,0 +1,5 @@
pub mod session;
pub use session::Session;
pub use session::TurnContext;
pub use session::ConfigureSession;

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,10 @@
pub mod planner;
pub mod types;
pub use planner::CommandPlanRequest;
pub use planner::ExecPlan;
pub use planner::PatchPlanRequest;
pub use planner::plan_apply_patch;
pub use planner::plan_exec;
pub use planner::should_escalate_on_failure;
pub use types::SandboxType;

View File

@@ -0,0 +1,106 @@
use std::collections::HashSet;
use std::path::Path;
use codex_apply_patch::ApplyPatchAction;
use codex_protocol::protocol::AskForApproval;
use codex_protocol::protocol::SandboxPolicy;
use crate::safety::SafetyCheck;
use crate::safety::assess_command_safety;
use crate::safety::assess_patch_safety;
use super::SandboxType;
#[derive(Clone, Debug)]
pub enum ExecPlan {
Reject {
reason: String,
},
AskUser {
reason: Option<String>,
},
Approved {
sandbox: SandboxType,
on_failure_escalate: bool,
approved_by_user: bool,
},
}
impl ExecPlan {
pub fn approved(
sandbox: SandboxType,
on_failure_escalate: bool,
approved_by_user: bool,
) -> Self {
ExecPlan::Approved {
sandbox,
on_failure_escalate,
approved_by_user,
}
}
}
pub struct CommandPlanRequest<'a> {
pub command: &'a [String],
pub approval: AskForApproval,
pub policy: &'a SandboxPolicy,
pub approved_session_commands: &'a HashSet<Vec<String>>,
pub with_escalated_permissions: bool,
pub justification: Option<&'a String>,
}
pub struct PatchPlanRequest<'a> {
pub action: &'a ApplyPatchAction,
pub approval: AskForApproval,
pub policy: &'a SandboxPolicy,
pub cwd: &'a Path,
pub user_explicitly_approved: bool,
}
pub fn plan_exec(req: &CommandPlanRequest<'_>) -> ExecPlan {
let safety = assess_command_safety(
req.command,
req.approval,
req.policy,
req.approved_session_commands,
req.with_escalated_permissions,
);
match safety {
SafetyCheck::AutoApprove { sandbox_type } => ExecPlan::approved(
sandbox_type,
should_escalate_on_failure(req.approval, sandbox_type),
false,
),
SafetyCheck::AskUser => ExecPlan::AskUser {
reason: req.justification.map(ToOwned::to_owned),
},
SafetyCheck::Reject { reason } => ExecPlan::Reject { reason },
}
}
pub fn plan_apply_patch(req: &PatchPlanRequest<'_>) -> ExecPlan {
if req.user_explicitly_approved {
return ExecPlan::approved(SandboxType::None, false, true);
}
match assess_patch_safety(req.action, req.approval, req.policy, req.cwd) {
SafetyCheck::AutoApprove { sandbox_type } => ExecPlan::approved(
sandbox_type,
should_escalate_on_failure(req.approval, sandbox_type),
false,
),
SafetyCheck::AskUser => ExecPlan::AskUser { reason: None },
SafetyCheck::Reject { reason } => ExecPlan::Reject { reason },
}
}
pub fn should_escalate_on_failure(approval: AskForApproval, sandbox: SandboxType) -> bool {
matches!(
(approval, sandbox),
(
AskForApproval::UnlessTrusted | AskForApproval::OnFailure,
SandboxType::MacosSeatbelt | SandboxType::LinuxSeccomp
)
)
}

View File

@@ -0,0 +1,79 @@
use serde::Deserialize;
use serde::Serialize;
#[derive(Debug, Clone, Serialize, PartialEq)]
pub struct ResponsesApiTool {
pub name: String,
pub description: String,
/// TODO: Validation. When strict is set to true, the JSON schema,
/// `required` and `additional_properties` must be present. All fields in
/// `properties` must be present in `required`.
pub strict: bool,
pub parameters: JsonSchema,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct FreeformTool {
pub name: String,
pub description: String,
pub format: FreeformToolFormat,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct FreeformToolFormat {
pub r#type: String,
pub syntax: String,
pub definition: String,
}
/// Generic JSON-Schema subset needed for our tool definitions
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(tag = "type", rename_all = "lowercase")]
pub enum JsonSchema {
Boolean {
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
},
String {
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
},
/// MCP schema allows "number" | "integer" for Number
#[serde(alias = "integer")]
Number {
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
},
Array {
items: Box<JsonSchema>,
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
},
Object {
properties: std::collections::BTreeMap<String, JsonSchema>,
#[serde(skip_serializing_if = "Option::is_none")]
required: Option<Vec<String>>,
#[serde(
rename = "additionalProperties",
skip_serializing_if = "Option::is_none"
)]
additional_properties: Option<bool>,
},
}
/// When serialized as JSON, this produces a valid "Tool" in the OpenAI Responses API.
#[derive(Debug, Clone, Serialize, PartialEq)]
#[serde(tag = "type")]
pub enum OpenAiTool {
#[serde(rename = "function")]
Function(ResponsesApiTool),
#[serde(rename = "local_shell")]
LocalShell {},
// TODO: Understand why we get an error on web_search although the API docs say it's supported.
// https://platform.openai.com/docs/guides/tools-web-search?api-mode=responses#:~:text=%7B%20type%3A%20%22web_search%22%20%7D%2C
#[serde(rename = "web_search")]
WebSearch {},
#[serde(rename = "custom")]
Freeform(FreeformTool),
}

View File

@@ -0,0 +1,72 @@
use crate::tooling::ApplyPatchToolType;
use crate::model_family::ModelFamily;
#[derive(Debug, Clone)]
pub enum ConfigShellToolType {
Default,
Local,
Streamable,
}
#[derive(Debug, Clone)]
pub struct ToolsConfig {
pub shell_type: ConfigShellToolType,
pub plan_tool: bool,
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
pub web_search_request: bool,
pub include_view_image_tool: bool,
pub experimental_unified_exec_tool: bool,
}
pub struct ToolsConfigParams<'a> {
pub model_family: &'a ModelFamily,
pub include_plan_tool: bool,
pub include_apply_patch_tool: bool,
pub include_web_search_request: bool,
pub use_streamable_shell_tool: bool,
pub include_view_image_tool: bool,
pub experimental_unified_exec_tool: bool,
}
impl ToolsConfig {
pub fn new(params: &ToolsConfigParams) -> Self {
let ToolsConfigParams {
model_family,
include_plan_tool,
include_apply_patch_tool,
include_web_search_request,
use_streamable_shell_tool,
include_view_image_tool,
experimental_unified_exec_tool,
} = params;
let shell_type = if *use_streamable_shell_tool {
ConfigShellToolType::Streamable
} else if model_family.uses_local_shell_tool {
ConfigShellToolType::Local
} else {
ConfigShellToolType::Default
};
let apply_patch_tool_type = match model_family.apply_patch_tool_type {
Some(ApplyPatchToolType::Freeform) => Some(ApplyPatchToolType::Freeform),
Some(ApplyPatchToolType::Function) => Some(ApplyPatchToolType::Function),
None => {
if *include_apply_patch_tool {
Some(ApplyPatchToolType::Freeform)
} else {
None
}
}
};
Self {
shell_type,
plan_tool: *include_plan_tool,
apply_patch_tool_type,
web_search_request: *include_web_search_request,
include_view_image_tool: *include_view_image_tool,
experimental_unified_exec_tool: *experimental_unified_exec_tool,
}
}
}

View File

@@ -103,7 +103,7 @@ pub(crate) async fn stream_chat_completions(
for c in items {
match c {
ReasoningItemContent::ReasoningText { text: t }
| ReasoningItemContent::Text { text: t } => text.push_str(t),
| ReasoningItemContent::Text { text: t } => text.push_str(t.as_str()),
}
}
if text.trim().is_empty() {
@@ -158,7 +158,7 @@ pub(crate) async fn stream_chat_completions(
match c {
ContentItem::InputText { text: t }
| ContentItem::OutputText { text: t } => {
text.push_str(t);
text.push_str(t.as_str());
}
_ => {}
}

View File

@@ -1,371 +1,5 @@
use crate::error::Result;
use crate::model_family::ModelFamily;
use crate::openai_tools::OpenAiTool;
use crate::protocol::RateLimitSnapshot;
use crate::protocol::TokenUsage;
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig;
use codex_protocol::config_types::Verbosity as VerbosityConfig;
use codex_protocol::models::ResponseItem;
use futures::Stream;
use serde::Serialize;
use serde_json::Value;
use std::borrow::Cow;
use std::ops::Deref;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
use tokio::sync::mpsc;
pub use codex_agent::client_common::*;
/// Review thread system prompt. Edit `core/src/review_prompt.md` to customize.
pub const REVIEW_PROMPT: &str = include_str!("../review_prompt.md");
use crate::error::CodexErr;
/// API request payload for a single model turn
#[derive(Default, Debug, Clone)]
pub struct Prompt {
/// Conversation context input items.
pub input: Vec<ResponseItem>,
/// Tools available to the model, including additional tools sourced from
/// external MCP servers.
pub(crate) tools: Vec<OpenAiTool>,
/// Optional override for the built-in BASE_INSTRUCTIONS.
pub base_instructions_override: Option<String>,
/// Optional the output schema for the model's response.
pub output_schema: Option<Value>,
}
impl Prompt {
pub(crate) fn get_full_instructions<'a>(&'a self, model: &'a ModelFamily) -> Cow<'a, str> {
let base = self
.base_instructions_override
.as_deref()
.unwrap_or(model.base_instructions.deref());
// When there are no custom instructions, add apply_patch_tool_instructions if:
// - the model needs special instructions (4.1)
// AND
// - there is no apply_patch tool present
let is_apply_patch_tool_present = self.tools.iter().any(|tool| match tool {
OpenAiTool::Function(f) => f.name == "apply_patch",
OpenAiTool::Freeform(f) => f.name == "apply_patch",
_ => false,
});
if self.base_instructions_override.is_none()
&& model.needs_special_apply_patch_instructions
&& !is_apply_patch_tool_present
{
Cow::Owned(format!("{base}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}"))
} else {
Cow::Borrowed(base)
}
}
pub(crate) fn get_formatted_input(&self) -> Vec<ResponseItem> {
self.input.clone()
}
}
#[derive(Debug)]
pub enum ResponseEvent {
Created,
OutputItemDone(ResponseItem),
Completed {
response_id: String,
token_usage: Option<TokenUsage>,
},
OutputTextDelta(String),
ReasoningSummaryDelta(String),
ReasoningContentDelta(String),
ReasoningSummaryPartAdded,
WebSearchCallBegin {
call_id: String,
},
RateLimits(RateLimitSnapshot),
}
#[derive(Debug, Serialize)]
pub(crate) struct Reasoning {
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) effort: Option<ReasoningEffortConfig>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) summary: Option<ReasoningSummaryConfig>,
}
#[derive(Debug, Serialize, Default, Clone)]
#[serde(rename_all = "snake_case")]
pub(crate) enum TextFormatType {
#[default]
JsonSchema,
}
#[derive(Debug, Serialize, Default, Clone)]
pub(crate) struct TextFormat {
pub(crate) r#type: TextFormatType,
pub(crate) strict: bool,
pub(crate) schema: Value,
pub(crate) name: String,
}
/// Controls under the `text` field in the Responses API for GPT-5.
#[derive(Debug, Serialize, Default, Clone)]
pub(crate) struct TextControls {
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) verbosity: Option<OpenAiVerbosity>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) format: Option<TextFormat>,
}
#[derive(Debug, Serialize, Default, Clone)]
#[serde(rename_all = "lowercase")]
pub(crate) enum OpenAiVerbosity {
Low,
#[default]
Medium,
High,
}
impl From<VerbosityConfig> for OpenAiVerbosity {
fn from(v: VerbosityConfig) -> Self {
match v {
VerbosityConfig::Low => OpenAiVerbosity::Low,
VerbosityConfig::Medium => OpenAiVerbosity::Medium,
VerbosityConfig::High => OpenAiVerbosity::High,
}
}
}
/// Request object that is serialized as JSON and POST'ed when using the
/// Responses API.
#[derive(Debug, Serialize)]
pub(crate) struct ResponsesApiRequest<'a> {
pub(crate) model: &'a str,
pub(crate) instructions: &'a str,
// TODO(mbolin): ResponseItem::Other should not be serialized. Currently,
// we code defensively to avoid this case, but perhaps we should use a
// separate enum for serialization.
pub(crate) input: &'a Vec<ResponseItem>,
pub(crate) tools: &'a [serde_json::Value],
pub(crate) tool_choice: &'static str,
pub(crate) parallel_tool_calls: bool,
pub(crate) reasoning: Option<Reasoning>,
pub(crate) store: bool,
pub(crate) stream: bool,
pub(crate) include: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) prompt_cache_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) text: Option<TextControls>,
}
pub(crate) fn create_reasoning_param_for_request(
model_family: &ModelFamily,
effort: Option<ReasoningEffortConfig>,
summary: ReasoningSummaryConfig,
) -> Option<Reasoning> {
if !model_family.supports_reasoning_summaries {
return None;
}
Some(Reasoning {
effort,
summary: Some(summary),
})
}
pub(crate) fn create_text_param_for_request(
verbosity: Option<VerbosityConfig>,
output_schema: &Option<Value>,
) -> Option<TextControls> {
if verbosity.is_none() && output_schema.is_none() {
return None;
}
Some(TextControls {
verbosity: verbosity.map(std::convert::Into::into),
format: output_schema.as_ref().map(|schema| TextFormat {
r#type: TextFormatType::JsonSchema,
strict: true,
schema: schema.clone(),
name: "codex_output_schema".to_string(),
}),
})
}
pub struct ResponseStream {
pub(crate) rx_event: mpsc::Receiver<Result<ResponseEvent>>,
}
impl Stream for ResponseStream {
type Item = Result<ResponseEvent>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.rx_event.poll_recv(cx)
}
}
#[cfg(test)]
mod tests {
use crate::model_family::find_family_for_model;
use pretty_assertions::assert_eq;
use super::*;
struct InstructionsTestCase {
pub slug: &'static str,
pub expects_apply_patch_instructions: bool,
}
#[test]
fn get_full_instructions_no_user_content() {
let prompt = Prompt {
..Default::default()
};
let test_cases = vec![
InstructionsTestCase {
slug: "gpt-3.5",
expects_apply_patch_instructions: true,
},
InstructionsTestCase {
slug: "gpt-4.1",
expects_apply_patch_instructions: true,
},
InstructionsTestCase {
slug: "gpt-4o",
expects_apply_patch_instructions: true,
},
InstructionsTestCase {
slug: "gpt-5",
expects_apply_patch_instructions: true,
},
InstructionsTestCase {
slug: "codex-mini-latest",
expects_apply_patch_instructions: true,
},
InstructionsTestCase {
slug: "gpt-oss:120b",
expects_apply_patch_instructions: false,
},
InstructionsTestCase {
slug: "gpt-5-codex",
expects_apply_patch_instructions: false,
},
];
for test_case in test_cases {
let model_family = find_family_for_model(test_case.slug).expect("known model slug");
let expected = if test_case.expects_apply_patch_instructions {
format!(
"{}\n{}",
model_family.clone().base_instructions,
APPLY_PATCH_TOOL_INSTRUCTIONS
)
} else {
model_family.clone().base_instructions
};
let full = prompt.get_full_instructions(&model_family);
assert_eq!(full, expected);
}
}
#[test]
fn serializes_text_verbosity_when_set() {
let input: Vec<ResponseItem> = vec![];
let tools: Vec<serde_json::Value> = vec![];
let req = ResponsesApiRequest {
model: "gpt-5",
instructions: "i",
input: &input,
tools: &tools,
tool_choice: "auto",
parallel_tool_calls: false,
reasoning: None,
store: false,
stream: true,
include: vec![],
prompt_cache_key: None,
text: Some(TextControls {
verbosity: Some(OpenAiVerbosity::Low),
format: None,
}),
};
let v = serde_json::to_value(&req).expect("json");
assert_eq!(
v.get("text")
.and_then(|t| t.get("verbosity"))
.and_then(|s| s.as_str()),
Some("low")
);
}
#[test]
fn serializes_text_schema_with_strict_format() {
let input: Vec<ResponseItem> = vec![];
let tools: Vec<serde_json::Value> = vec![];
let schema = serde_json::json!({
"type": "object",
"properties": {
"answer": {"type": "string"}
},
"required": ["answer"],
});
let text_controls =
create_text_param_for_request(None, &Some(schema.clone())).expect("text controls");
let req = ResponsesApiRequest {
model: "gpt-5",
instructions: "i",
input: &input,
tools: &tools,
tool_choice: "auto",
parallel_tool_calls: false,
reasoning: None,
store: false,
stream: true,
include: vec![],
prompt_cache_key: None,
text: Some(text_controls),
};
let v = serde_json::to_value(&req).expect("json");
let text = v.get("text").expect("text field");
assert!(text.get("verbosity").is_none());
let format = text.get("format").expect("format field");
assert_eq!(
format.get("name"),
Some(&serde_json::Value::String("codex_output_schema".into()))
);
assert_eq!(
format.get("type"),
Some(&serde_json::Value::String("json_schema".into()))
);
assert_eq!(format.get("strict"), Some(&serde_json::Value::Bool(true)));
assert_eq!(format.get("schema"), Some(&schema));
}
#[test]
fn omits_text_when_not_set() {
let input: Vec<ResponseItem> = vec![];
let tools: Vec<serde_json::Value> = vec![];
let req = ResponsesApiRequest {
model: "gpt-5",
instructions: "i",
input: &input,
tools: &tools,
tool_choice: "auto",
parallel_tool_calls: false,
reasoning: None,
store: false,
stream: true,
include: vec![],
prompt_cache_key: None,
text: None,
};
let v = serde_json::to_value(&req).expect("json");
assert!(v.get("text").is_none());
}
}
pub type ResponseStream = codex_agent::client_common::ResponseStream<CodexErr>;

View File

@@ -71,6 +71,7 @@ use crate::exec_command::WriteStdinParams;
use crate::exec_env::create_env;
use crate::mcp_connection_manager::McpConnectionManager;
use crate::mcp_tool_call::handle_mcp_tool_call;
use crate::model_client_adapter::CoreModelClientAdapter;
use crate::model_family::find_family_for_model;
use crate::model_provider_info::ModelProviderExt;
use crate::openai_model_info::get_model_info;
@@ -119,6 +120,7 @@ use crate::sandbox::BackendRegistry;
use crate::sandbox::ExecPlan;
use crate::sandbox::ExecRuntimeContext;
use crate::sandbox::PreparedExec;
use crate::sandbox::build_exec_params_for_apply_patch;
use crate::sandbox::prepare_exec_invocation;
use crate::sandbox::run_with_plan;
use crate::shell;
@@ -136,6 +138,7 @@ use codex_agent::apply_patch::ApplyPatchExec;
use codex_agent::apply_patch::InternalApplyPatchInvocation;
use codex_agent::apply_patch::apply_patch;
use codex_agent::apply_patch::convert_apply_patch_to_protocol;
use codex_agent::model_client::ModelClientAdapter;
use codex_agent::services::ApprovalCoordinator;
use codex_agent::session_services::SessionServices;
use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig;
@@ -246,14 +249,15 @@ impl Codex {
// Construct the model client and initial turn context before handing off to the runtime.
let credentials_provider: Option<Arc<dyn CredentialsProvider>> = Some(auth_manager.clone());
let client = ModelClient::new(
agent_config.clone(),
credentials_provider,
configure_session.provider.clone(),
configure_session.model_reasoning_effort,
configure_session.model_reasoning_summary,
conversation_id,
);
let client: Arc<dyn ModelClientAdapter<Error = CodexErr>> =
Arc::new(CoreModelClientAdapter::new(ModelClient::new(
agent_config.clone(),
credentials_provider,
configure_session.provider.clone(),
configure_session.model_reasoning_effort,
configure_session.model_reasoning_summary,
conversation_id,
)));
let tools_config = ToolsConfig::new(&ToolsConfigParams {
model_family: &agent_config.model_family,
include_plan_tool: agent_config.include_plan_tool,
@@ -386,9 +390,8 @@ pub(crate) struct Session {
}
/// The context needed for a single turn of the conversation.
#[derive(Debug)]
pub(crate) struct TurnContext {
pub(crate) client: ModelClient,
pub(crate) client: Arc<dyn ModelClientAdapter<Error = CodexErr>>,
/// The session's current working directory. All relative paths provided by
/// the model as well as sandbox policies are resolved against this path
/// instead of `std::env::current_dir()`.
@@ -411,6 +414,22 @@ impl TurnContext {
}
}
impl std::fmt::Debug for TurnContext {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TurnContext")
.field("cwd", &self.cwd)
.field("base_instructions", &self.base_instructions)
.field("user_instructions", &self.user_instructions)
.field("approval_policy", &self.approval_policy)
.field("sandbox_policy", &self.sandbox_policy)
.field("shell_environment_policy", &self.shell_environment_policy)
.field("tools_config", &self.tools_config)
.field("is_review_mode", &self.is_review_mode)
.field("final_output_json_schema", &self.final_output_json_schema)
.finish()
}
}
/// Configure the model session.
struct ConfigureSession {
/// Provider identifier ("openai", "openrouter", ...).
@@ -1232,14 +1251,15 @@ async fn submission_loop(
updated_config.model_context_window = Some(model_info.context_window);
}
let client = ModelClient::new(
Arc::new(updated_config),
auth_manager,
provider,
effective_effort,
effective_summary,
sess.conversation_id,
);
let client: Arc<dyn ModelClientAdapter<Error = CodexErr>> =
Arc::new(CoreModelClientAdapter::new(ModelClient::new(
Arc::new(updated_config),
auth_manager,
provider,
effective_effort,
effective_summary,
sess.conversation_id,
)));
let new_approval_policy = approval_policy.unwrap_or(prev.approval_policy);
let new_sandbox_policy = sandbox_policy
@@ -1323,14 +1343,15 @@ async fn submission_loop(
// Build a new client with perturn reasoning settings.
// Reuse the same provider and session id; auth defaults to env/API key.
let client = ModelClient::new(
Arc::new(per_turn_config),
auth_manager,
provider,
effort,
summary,
sess.conversation_id,
);
let client: Arc<dyn ModelClientAdapter<Error = CodexErr>> =
Arc::new(CoreModelClientAdapter::new(ModelClient::new(
Arc::new(per_turn_config),
auth_manager,
provider,
effort,
summary,
sess.conversation_id,
)));
let fresh_turn_context = TurnContext {
client,
@@ -1584,14 +1605,15 @@ async fn spawn_review_thread(
}
let per_turn_config = Arc::new(per_turn_config);
let client = ModelClient::new(
per_turn_config.clone(),
auth_manager,
provider,
per_turn_config.model_reasoning_effort,
per_turn_config.model_reasoning_summary,
sess.conversation_id,
);
let client: Arc<dyn ModelClientAdapter<Error = CodexErr>> =
Arc::new(CoreModelClientAdapter::new(ModelClient::new(
per_turn_config.clone(),
auth_manager,
provider,
per_turn_config.model_reasoning_effort,
per_turn_config.model_reasoning_summary,
sess.conversation_id,
)));
let review_turn_context = TurnContext {
client,
@@ -2673,6 +2695,8 @@ async fn handle_container_exec_with_params(
)));
}
let mut params = params;
// check if this was a patch, and apply it if so
let apply_patch_exec = match maybe_parse_apply_patch_verified(&params.command, &params.cwd) {
MaybeApplyPatchVerified::Body(changes) => {
@@ -2685,6 +2709,7 @@ async fn handle_container_exec_with_params(
match apply_patch(sess, apply_patch_context, &sub_id, &call_id, changes).await {
InternalApplyPatchInvocation::Output(item) => return item,
InternalApplyPatchInvocation::DelegateToExec(apply_patch_exec) => {
params = build_exec_params_for_apply_patch(&apply_patch_exec, &params)?;
Some(apply_patch_exec)
}
}
@@ -2711,7 +2736,9 @@ async fn handle_container_exec_with_params(
let prepared = prepare_exec_invocation(
sess,
turn_context,
turn_context.approval_policy,
&turn_context.sandbox_policy,
&turn_context.cwd,
&sub_id,
&call_id,
params,
@@ -3560,14 +3587,15 @@ mod tests {
let config = Arc::new(config);
let agent_config = Arc::new(AgentConfig::from(config.as_ref()));
let conversation_id = ConversationId::default();
let client = ModelClient::new(
agent_config.clone(),
None,
agent_config.model_provider.clone(),
agent_config.model_reasoning_effort,
agent_config.model_reasoning_summary,
conversation_id,
);
let client: Arc<dyn ModelClientAdapter<Error = CodexErr>> =
Arc::new(CoreModelClientAdapter::new(ModelClient::new(
agent_config.clone(),
None,
agent_config.model_provider.clone(),
agent_config.model_reasoning_effort,
agent_config.model_reasoning_summary,
conversation_id,
)));
let tools_config = ToolsConfig::new(&ToolsConfigParams {
model_family: &agent_config.model_family,
include_plan_tool: agent_config.include_plan_tool,

View File

@@ -1,7 +1,6 @@
#[cfg(unix)]
use std::os::unix::process::ExitStatusExt;
use std::collections::HashMap;
use std::io;
use std::path::Path;
use std::path::PathBuf;
@@ -27,10 +26,9 @@ use crate::protocol::SandboxPolicy;
use crate::seatbelt::spawn_command_under_seatbelt;
use crate::spawn::StdioPolicy;
use crate::spawn::spawn_child_async;
pub use codex_agent::exec::ExecParams;
pub use codex_agent::sandbox::SandboxType;
const DEFAULT_TIMEOUT_MS: u64 = 10_000;
// Hardcode these since it does not seem worth including the libc crate just
// for these.
const SIGKILL_CODE: i32 = 9;
@@ -46,22 +44,6 @@ const AGGREGATE_BUFFER_INITIAL_CAPACITY: usize = 8 * 1024; // 8 KiB
/// Aggregation still collects full output; only the live event stream is capped.
pub(crate) const MAX_EXEC_OUTPUT_DELTAS_PER_CALL: usize = 10_000;
#[derive(Clone, Debug)]
pub struct ExecParams {
pub command: Vec<String>,
pub cwd: PathBuf,
pub timeout_ms: Option<u64>,
pub env: HashMap<String, String>,
pub with_escalated_permissions: Option<bool>,
pub justification: Option<String>,
}
impl ExecParams {
pub fn timeout_duration(&self) -> Duration {
Duration::from_millis(self.timeout_ms.unwrap_or(DEFAULT_TIMEOUT_MS))
}
}
#[derive(Clone)]
pub struct StdoutStream {
pub sub_id: String,

View File

@@ -37,11 +37,13 @@ pub mod landlock;
mod mcp_connection_manager;
mod mcp_tool_call;
mod message_history;
mod model_client_adapter;
mod model_provider_info;
pub mod parse_command;
mod truncate;
mod unified_exec;
mod user_instructions;
pub use model_client_adapter::CoreModelClientAdapter;
pub use model_provider_info::BUILT_IN_OSS_MODEL_PROVIDER_ID;
pub use model_provider_info::built_in_model_providers;
pub use model_provider_info::create_oss_provider_with_base_url;

View File

@@ -0,0 +1,70 @@
use std::sync::Arc;
use async_trait::async_trait;
use crate::client::ModelClient;
use crate::client_common::Prompt;
use crate::error::CodexErr;
use crate::model_family::ModelFamily;
use codex_agent::model_client::ModelClientAdapter;
use codex_agent::model_provider::ModelProviderInfo;
use codex_agent::services::CredentialsProvider;
#[derive(Clone)]
pub struct CoreModelClientAdapter {
inner: ModelClient,
}
impl CoreModelClientAdapter {
pub fn new(inner: ModelClient) -> Self {
Self { inner }
}
pub fn inner(&self) -> &ModelClient {
&self.inner
}
}
#[async_trait]
impl ModelClientAdapter for CoreModelClientAdapter {
type Error = CodexErr;
fn get_model_context_window(&self) -> Option<u64> {
self.inner.get_model_context_window()
}
fn get_auto_compact_token_limit(&self) -> Option<i64> {
self.inner.get_auto_compact_token_limit()
}
fn get_provider(&self) -> ModelProviderInfo {
self.inner.get_provider()
}
fn get_model(&self) -> String {
self.inner.get_model()
}
fn get_model_family(&self) -> ModelFamily {
self.inner.get_model_family()
}
fn get_reasoning_effort(&self) -> Option<codex_protocol::config_types::ReasoningEffort> {
self.inner.get_reasoning_effort()
}
fn get_reasoning_summary(&self) -> codex_protocol::config_types::ReasoningSummary {
self.inner.get_reasoning_summary()
}
fn get_auth_manager(&self) -> Option<Arc<dyn CredentialsProvider>> {
self.inner.get_auth_manager()
}
async fn stream(
&self,
prompt: &Prompt,
) -> Result<codex_agent::client_common::ResponseStream<Self::Error>, Self::Error> {
self.inner.stream(prompt).await
}
}

View File

@@ -10,153 +10,14 @@ use crate::plan_tool::PLAN_TOOL;
use crate::tool_apply_patch::ApplyPatchToolType;
use crate::tool_apply_patch::create_apply_patch_freeform_tool;
use crate::tool_apply_patch::create_apply_patch_json_tool;
#[derive(Debug, Clone, Serialize, PartialEq)]
pub struct ResponsesApiTool {
pub(crate) name: String,
pub(crate) description: String,
/// TODO: Validation. When strict is set to true, the JSON schema,
/// `required` and `additional_properties` must be present. All fields in
/// `properties` must be present in `required`.
pub(crate) strict: bool,
pub(crate) parameters: JsonSchema,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct FreeformTool {
pub(crate) name: String,
pub(crate) description: String,
pub(crate) format: FreeformToolFormat,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct FreeformToolFormat {
pub(crate) r#type: String,
pub(crate) syntax: String,
pub(crate) definition: String,
}
/// When serialized as JSON, this produces a valid "Tool" in the OpenAI
/// Responses API.
#[derive(Debug, Clone, Serialize, PartialEq)]
#[serde(tag = "type")]
pub(crate) enum OpenAiTool {
#[serde(rename = "function")]
Function(ResponsesApiTool),
#[serde(rename = "local_shell")]
LocalShell {},
// TODO: Understand why we get an error on web_search although the API docs say it's supported.
// https://platform.openai.com/docs/guides/tools-web-search?api-mode=responses#:~:text=%7B%20type%3A%20%22web_search%22%20%7D%2C
#[serde(rename = "web_search")]
WebSearch {},
#[serde(rename = "custom")]
Freeform(FreeformTool),
}
#[derive(Debug, Clone)]
pub enum ConfigShellToolType {
Default,
Local,
Streamable,
}
#[derive(Debug, Clone)]
pub(crate) struct ToolsConfig {
pub shell_type: ConfigShellToolType,
pub plan_tool: bool,
pub apply_patch_tool_type: Option<ApplyPatchToolType>,
pub web_search_request: bool,
pub include_view_image_tool: bool,
pub experimental_unified_exec_tool: bool,
}
pub(crate) struct ToolsConfigParams<'a> {
pub(crate) model_family: &'a ModelFamily,
pub(crate) include_plan_tool: bool,
pub(crate) include_apply_patch_tool: bool,
pub(crate) include_web_search_request: bool,
pub(crate) use_streamable_shell_tool: bool,
pub(crate) include_view_image_tool: bool,
pub(crate) experimental_unified_exec_tool: bool,
}
impl ToolsConfig {
pub fn new(params: &ToolsConfigParams) -> Self {
let ToolsConfigParams {
model_family,
include_plan_tool,
include_apply_patch_tool,
include_web_search_request,
use_streamable_shell_tool,
include_view_image_tool,
experimental_unified_exec_tool,
} = params;
let shell_type = if *use_streamable_shell_tool {
ConfigShellToolType::Streamable
} else if model_family.uses_local_shell_tool {
ConfigShellToolType::Local
} else {
ConfigShellToolType::Default
};
let apply_patch_tool_type = match model_family.apply_patch_tool_type {
Some(ApplyPatchToolType::Freeform) => Some(ApplyPatchToolType::Freeform),
Some(ApplyPatchToolType::Function) => Some(ApplyPatchToolType::Function),
None => {
if *include_apply_patch_tool {
Some(ApplyPatchToolType::Freeform)
} else {
None
}
}
};
Self {
shell_type,
plan_tool: *include_plan_tool,
apply_patch_tool_type,
web_search_request: *include_web_search_request,
include_view_image_tool: *include_view_image_tool,
experimental_unified_exec_tool: *experimental_unified_exec_tool,
}
}
}
/// Generic JSONSchema subset needed for our tool definitions
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(tag = "type", rename_all = "lowercase")]
pub(crate) enum JsonSchema {
Boolean {
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
},
String {
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
},
/// MCP schema allows "number" | "integer" for Number
#[serde(alias = "integer")]
Number {
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
},
Array {
items: Box<JsonSchema>,
#[serde(skip_serializing_if = "Option::is_none")]
description: Option<String>,
},
Object {
properties: BTreeMap<String, JsonSchema>,
#[serde(skip_serializing_if = "Option::is_none")]
required: Option<Vec<String>>,
#[serde(
rename = "additionalProperties",
skip_serializing_if = "Option::is_none"
)]
additional_properties: Option<bool>,
},
}
pub(crate) use codex_agent::tool_schema::FreeformTool;
pub(crate) use codex_agent::tool_schema::FreeformToolFormat;
pub(crate) use codex_agent::tool_schema::JsonSchema;
pub(crate) use codex_agent::tool_schema::OpenAiTool;
pub(crate) use codex_agent::tool_schema::ResponsesApiTool;
pub(crate) use codex_agent::tools_config::ConfigShellToolType;
pub(crate) use codex_agent::tools_config::ToolsConfig;
pub(crate) use codex_agent::tools_config::ToolsConfigParams;
fn create_unified_exec_tool() -> OpenAiTool {
let mut properties = BTreeMap::new();

View File

@@ -2,17 +2,14 @@ mod apply_patch_adapter;
mod backend;
mod planner;
pub(crate) use apply_patch_adapter::build_exec_params_for_apply_patch;
pub use backend::BackendRegistry;
pub use backend::DirectBackend;
pub use backend::LinuxBackend;
pub use backend::SeatbeltBackend;
pub use backend::SpawnBackend;
pub use planner::ExecPlan;
pub use planner::ExecRequest;
pub use planner::PatchExecRequest;
pub use codex_agent::sandbox::ExecPlan;
pub(crate) use planner::PreparedExec;
pub use planner::plan_apply_patch;
pub use planner::plan_exec;
pub(crate) use planner::prepare_exec_invocation;
use crate::error::Result;

View File

@@ -2,111 +2,20 @@ use std::collections::HashSet;
use std::path::Path;
use codex_agent::apply_patch::ApplyPatchExec;
use codex_agent::safety::SafetyCheck;
use codex_agent::safety::assess_command_safety;
use codex_agent::safety::assess_patch_safety;
use codex_agent::sandbox::CommandPlanRequest;
use codex_agent::sandbox::ExecPlan;
use codex_agent::sandbox::PatchPlanRequest;
use codex_agent::sandbox::SandboxType;
use codex_agent::sandbox::plan_apply_patch;
use codex_agent::sandbox::plan_exec;
use codex_agent::services::ApprovalCoordinator;
use codex_apply_patch::ApplyPatchAction;
use super::apply_patch_adapter::build_exec_params_for_apply_patch;
use crate::codex::TurnContext;
use crate::exec::ExecParams;
use crate::function_tool::FunctionCallError;
use crate::protocol::AskForApproval;
use crate::protocol::ReviewDecision;
use crate::protocol::SandboxPolicy;
#[derive(Clone, Debug)]
pub struct ExecRequest<'a> {
pub params: &'a ExecParams,
pub approval: AskForApproval,
pub policy: &'a SandboxPolicy,
pub approved_session_commands: &'a HashSet<Vec<String>>,
}
#[derive(Clone, Debug)]
pub enum ExecPlan {
Reject {
reason: String,
},
AskUser {
reason: Option<String>,
},
Approved {
sandbox: SandboxType,
on_failure_escalate: bool,
approved_by_user: bool,
},
}
impl ExecPlan {
pub fn approved(
sandbox: SandboxType,
on_failure_escalate: bool,
approved_by_user: bool,
) -> Self {
ExecPlan::Approved {
sandbox,
on_failure_escalate,
approved_by_user,
}
}
}
pub fn plan_exec(req: &ExecRequest<'_>) -> ExecPlan {
let params = req.params;
let with_escalated_permissions = params.with_escalated_permissions.unwrap_or(false);
let safety = assess_command_safety(
&params.command,
req.approval,
req.policy,
req.approved_session_commands,
with_escalated_permissions,
);
match safety {
SafetyCheck::AutoApprove { sandbox_type } => ExecPlan::Approved {
sandbox: sandbox_type,
on_failure_escalate: should_escalate_on_failure(req.approval, sandbox_type),
approved_by_user: false,
},
SafetyCheck::AskUser => ExecPlan::AskUser {
reason: params.justification.clone(),
},
SafetyCheck::Reject { reason } => ExecPlan::Reject { reason },
}
}
#[derive(Clone, Debug)]
pub struct PatchExecRequest<'a> {
pub action: &'a ApplyPatchAction,
pub approval: AskForApproval,
pub policy: &'a SandboxPolicy,
pub cwd: &'a Path,
pub user_explicitly_approved: bool,
}
pub fn plan_apply_patch(req: &PatchExecRequest<'_>) -> ExecPlan {
if req.user_explicitly_approved {
ExecPlan::Approved {
sandbox: SandboxType::None,
on_failure_escalate: false,
approved_by_user: true,
}
} else {
match assess_patch_safety(req.action, req.approval, req.policy, req.cwd) {
SafetyCheck::AutoApprove { sandbox_type } => ExecPlan::Approved {
sandbox: sandbox_type,
on_failure_escalate: should_escalate_on_failure(req.approval, sandbox_type),
approved_by_user: false,
},
SafetyCheck::AskUser => ExecPlan::AskUser { reason: None },
SafetyCheck::Reject { reason } => ExecPlan::Reject { reason },
}
}
}
#[derive(Debug)]
pub(crate) struct PreparedExec {
pub(crate) params: ExecParams,
@@ -117,28 +26,31 @@ pub(crate) struct PreparedExec {
pub(crate) async fn prepare_exec_invocation(
approvals: &dyn ApprovalCoordinator,
turn_context: &TurnContext,
approval_policy: AskForApproval,
sandbox_policy: &SandboxPolicy,
cwd: &Path,
sub_id: &str,
call_id: &str,
params: ExecParams,
apply_patch_exec: Option<ApplyPatchExec>,
approved_session_commands: HashSet<Vec<String>>,
) -> Result<PreparedExec, FunctionCallError> {
let mut params = params;
let command_for_display = if let Some(exec) = apply_patch_exec.as_ref() {
vec!["apply_patch".to_string(), exec.action.patch.clone()]
} else {
params.command.clone()
};
let (plan, command_for_display) = if let Some(exec) = apply_patch_exec.as_ref() {
params = build_exec_params_for_apply_patch(exec, &params)?;
let command_for_display = vec!["apply_patch".to_string(), exec.action.patch.clone()];
let plan_req = PatchExecRequest {
let plan = if let Some(exec) = apply_patch_exec.as_ref() {
let plan_req = PatchPlanRequest {
action: &exec.action,
approval: turn_context.approval_policy,
policy: &turn_context.sandbox_policy,
cwd: &turn_context.cwd,
approval: approval_policy,
policy: sandbox_policy,
cwd,
user_explicitly_approved: exec.user_explicitly_approved_this_action,
};
let plan = match plan_apply_patch(&plan_req) {
match plan_apply_patch(&plan_req) {
plan @ ExecPlan::Approved { .. } => plan,
ExecPlan::AskUser { .. } => {
return Err(FunctionCallError::RespondToModel(
@@ -150,20 +62,18 @@ pub(crate) async fn prepare_exec_invocation(
"patch rejected: {reason}"
)));
}
}
} else {
let plan_req = CommandPlanRequest {
command: &params.command,
approval: approval_policy,
policy: sandbox_policy,
approved_session_commands: &approved_session_commands,
with_escalated_permissions: params.with_escalated_permissions.unwrap_or(false),
justification: params.justification.as_ref(),
};
(plan, command_for_display)
} else {
let command_for_display = params.command.clone();
let initial_plan = plan_exec(&ExecRequest {
params: &params,
approval: turn_context.approval_policy,
policy: &turn_context.sandbox_policy,
approved_session_commands: &approved_session_commands,
});
let plan = match initial_plan {
match plan_exec(&plan_req) {
plan @ ExecPlan::Approved { .. } => plan,
ExecPlan::AskUser { reason } => {
let decision = approvals
@@ -175,6 +85,7 @@ pub(crate) async fn prepare_exec_invocation(
reason,
)
.await;
match decision {
ReviewDecision::Approved => ExecPlan::approved(SandboxType::None, false, true),
ReviewDecision::ApprovedForSession => {
@@ -193,9 +104,7 @@ pub(crate) async fn prepare_exec_invocation(
"exec command rejected: {reason:?}"
)));
}
};
(plan, command_for_display)
}
};
Ok(PreparedExec {
@@ -205,13 +114,3 @@ pub(crate) async fn prepare_exec_invocation(
apply_patch_exec,
})
}
fn should_escalate_on_failure(approval: AskForApproval, sandbox: SandboxType) -> bool {
matches!(
(approval, sandbox),
(
AskForApproval::UnlessTrusted | AskForApproval::OnFailure,
SandboxType::MacosSeatbelt | SandboxType::LinuxSeccomp
)
)
}