Compare commits

...

2 Commits

Author SHA1 Message Date
daniel-oai
e0a980c641 Merge branch 'main' into canvrno/windows_prompt 2026-01-29 11:33:10 -08:00
canvrno-oai
2bbffa2bb0 Added env-specific string replacements for shell usage examples in prompt 2026-01-28 18:55:49 -08:00
5 changed files with 102 additions and 30 deletions

View File

@@ -2,7 +2,7 @@ You are Codex, based on GPT-5. You are running as a coding agent in the Codex CL
## General
- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.)
{{SHELL_COMMANDS_SEARCH_BULLET}}
## Editing constraints

View File

@@ -281,7 +281,7 @@ For casual greetings, acknowledgements, or other one-off conversational messages
When using the shell, you must adhere to the following guidelines:
- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.)
{{SHELL_COMMANDS_SEARCH_BULLET}}
- Do not use python scripts to attempt to output larger chunks of a file.
## apply_patch

View File

@@ -247,9 +247,9 @@ For casual greetings, acknowledgements, or other one-off conversational messages
When using the shell, you must adhere to the following guidelines:
- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.)
{{SHELL_COMMANDS_SEARCH_BULLET}}
- Do not use python scripts to attempt to output larger chunks of a file.
- Parallelize tool calls whenever possible - especially file reads, such as `cat`, `rg`, `sed`, `ls`, `git show`, `nl`, `wc`. Use `multi_tool_use.parallel` to parallelize tool calls and only this.
{{SHELL_COMMANDS_PARALLELIZE_BULLET}}
## apply_patch

View File

@@ -260,6 +260,62 @@ fn maybe_push_chat_wire_api_deprecation(
});
}
// Shell usage example consts to provide shell-specific prompting on command usage. Prevents model from attempting to execute incorrect commands for the platform.
const SHELL_COMMANDS_SEARCH_BULLET_PLACEHOLDER: &str = "{{SHELL_COMMANDS_SEARCH_BULLET}}";
const SHELL_COMMANDS_PARALLELIZE_BULLET_PLACEHOLDER: &str = "{{SHELL_COMMANDS_PARALLELIZE_BULLET}}";
const SHELL_COMMANDS_SEARCH_BULLET_POWERSHELL: &str = "- When searching for text or files in PowerShell, prefer using `Get-ChildItem -Recurse | Select-String -Pattern` for text search and `Get-ChildItem -Recurse -File` for file listing.";
const SHELL_COMMANDS_PARALLELIZE_BULLET_POWERSHELL: &str = "- Parallelize tool calls whenever possible - especially file reads, such as `Get-ChildItem`, `Select-String`, `Get-Content`, `git show`, `nl`, `wc`. Use `multi_tool_use.parallel` to parallelize tool calls and only this.";
const SHELL_COMMANDS_SEARCH_BULLET_CMD: &str = "- When searching for text or files in cmd, prefer using `findstr /S /N /I` for text search and `dir /s /b` for file listing.";
const SHELL_COMMANDS_PARALLELIZE_BULLET_CMD: &str = "- Parallelize tool calls whenever possible - especially file reads, such as `dir /s /b`, `findstr /S /N /I`, `type`, `git show`, `nl`, `wc`. Use `multi_tool_use.parallel` to parallelize tool calls and only this.";
const SHELL_COMMANDS_SEARCH_BULLET_BASH_ZSH: &str = "- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.)";
const SHELL_COMMANDS_PARALLELIZE_BULLET_BASH_ZSH: &str = "- Parallelize tool calls whenever possible - especially file reads, such as `cat`, `rg`, `sed`, `ls`, `git show`, `nl`, `wc`. Use `multi_tool_use.parallel` to parallelize tool calls and only this.";
fn render_base_instructions(base_instructions: String) -> String {
let shell_type = shell::default_user_shell().shell_type;
let (search_bullet, parallelize_bullet) = match shell_type {
shell::ShellType::PowerShell => (
SHELL_COMMANDS_SEARCH_BULLET_POWERSHELL,
SHELL_COMMANDS_PARALLELIZE_BULLET_POWERSHELL,
),
shell::ShellType::Cmd => (
SHELL_COMMANDS_SEARCH_BULLET_CMD,
SHELL_COMMANDS_PARALLELIZE_BULLET_CMD,
),
shell::ShellType::Zsh | shell::ShellType::Bash | shell::ShellType::Sh => (
SHELL_COMMANDS_SEARCH_BULLET_BASH_ZSH,
SHELL_COMMANDS_PARALLELIZE_BULLET_BASH_ZSH,
),
};
base_instructions
.replace(SHELL_COMMANDS_SEARCH_BULLET_PLACEHOLDER, search_bullet)
.replace(
SHELL_COMMANDS_PARALLELIZE_BULLET_PLACEHOLDER,
parallelize_bullet,
)
}
// Resolves base instructions with env-aware shell command usage tips.
pub(crate) fn resolve_base_instructions(
config: &Config,
conversation_history: Option<&InitialHistory>,
model_info: &ModelInfo,
personality: Option<Personality>,
) -> String {
let personality = personality.or(config.model_personality);
let base_instructions = config
.base_instructions
.clone()
.or_else(|| {
conversation_history.and_then(|history| history.get_base_instructions().map(|s| s.text))
})
.unwrap_or_else(|| model_info.get_model_instructions(personality));
render_base_instructions(base_instructions)
}
impl Codex {
/// Spawn a new [`Codex`] and initialize the session.
#[allow(clippy::too_many_arguments)]
@@ -317,13 +373,14 @@ impl Codex {
// Resolve base instructions for the session. Priority order:
// 1. config.base_instructions override
// 2. conversation history => session_meta.base_instructions
// 3. base_intructions for current model
// 3. base_instructions for current model
let model_info = models_manager.get_model_info(model.as_str(), &config).await;
let base_instructions = config
.base_instructions
.clone()
.or_else(|| conversation_history.get_base_instructions().map(|s| s.text))
.unwrap_or_else(|| model_info.get_model_instructions(config.model_personality));
let base_instructions = resolve_base_instructions(
config.as_ref(),
Some(&conversation_history),
&model_info,
config.model_personality,
);
// TODO (aibrahim): Consolidate config.model and config.model_reasoning_effort into config.collaboration_mode
// to avoid extracting these fields separately and constructing CollaborationMode here.
@@ -4055,14 +4112,15 @@ mod tests {
);
}
let resolved_base_instructions =
resolve_base_instructions(&config, None, &model_info, config.model_personality);
{
let mut state = session.state.lock().await;
state.session_configuration.base_instructions =
model_info.base_instructions.clone();
state.session_configuration.base_instructions = resolved_base_instructions.clone();
}
let base_instructions = session.get_base_instructions().await;
assert_eq!(base_instructions.text, model_info.base_instructions);
assert_eq!(base_instructions.text, resolved_base_instructions);
}
}
@@ -4402,10 +4460,12 @@ mod tests {
developer_instructions: config.developer_instructions.clone(),
user_instructions: config.user_instructions.clone(),
personality: config.model_personality,
base_instructions: config
.base_instructions
.clone()
.unwrap_or_else(|| model_info.get_model_instructions(config.model_personality)),
base_instructions: resolve_base_instructions(
config.as_ref(),
None,
&model_info,
config.model_personality,
),
compact_prompt: config.compact_prompt.clone(),
approval_policy: config.approval_policy.clone(),
sandbox_policy: config.sandbox_policy.clone(),
@@ -4483,10 +4543,12 @@ mod tests {
developer_instructions: config.developer_instructions.clone(),
user_instructions: config.user_instructions.clone(),
personality: config.model_personality,
base_instructions: config
.base_instructions
.clone()
.unwrap_or_else(|| model_info.get_model_instructions(config.model_personality)),
base_instructions: resolve_base_instructions(
config.as_ref(),
None,
&model_info,
config.model_personality,
),
compact_prompt: config.compact_prompt.clone(),
approval_policy: config.approval_policy.clone(),
sandbox_policy: config.sandbox_policy.clone(),
@@ -4748,10 +4810,12 @@ mod tests {
developer_instructions: config.developer_instructions.clone(),
user_instructions: config.user_instructions.clone(),
personality: config.model_personality,
base_instructions: config
.base_instructions
.clone()
.unwrap_or_else(|| model_info.get_model_instructions(config.model_personality)),
base_instructions: resolve_base_instructions(
config.as_ref(),
None,
&model_info,
config.model_personality,
),
compact_prompt: config.compact_prompt.clone(),
approval_policy: config.approval_policy.clone(),
sandbox_policy: config.sandbox_policy.clone(),
@@ -4862,10 +4926,12 @@ mod tests {
developer_instructions: config.developer_instructions.clone(),
user_instructions: config.user_instructions.clone(),
personality: config.model_personality,
base_instructions: config
.base_instructions
.clone()
.unwrap_or_else(|| model_info.get_model_instructions(config.model_personality)),
base_instructions: resolve_base_instructions(
config.as_ref(),
None,
&model_info,
config.model_personality,
),
compact_prompt: config.compact_prompt.clone(),
approval_policy: config.approval_policy.clone(),
sandbox_policy: config.sandbox_policy.clone(),

View File

@@ -1,4 +1,5 @@
use crate::codex::TurnContext;
use crate::codex::resolve_base_instructions;
use crate::context_manager::normalize;
use crate::instructions::SkillInstructions;
use crate::instructions::UserInstructions;
@@ -89,7 +90,12 @@ impl ContextManager {
let personality = turn_context
.personality
.or(turn_context.client.config().model_personality);
let base_instructions = model_info.get_model_instructions(personality);
let base_instructions = resolve_base_instructions(
turn_context.client.config().as_ref(),
None,
&model_info,
personality,
);
let base_tokens = i64::try_from(approx_token_count(&base_instructions)).unwrap_or(i64::MAX);
let items_tokens = self.items.iter().fold(0i64, |acc, item| {