Compare commits

...

9 Commits

Author SHA1 Message Date
jif-oai
39ffc2f0b8 Fix merge 2025-12-04 14:44:24 +00:00
jif-oai
860e48f210 Merge remote-tracking branch 'origin/main' into jif/prompt-cleaning
# Conflicts:
#	codex-rs/core/src/client.rs
#	codex-rs/core/src/client_common.rs
2025-12-04 14:41:16 +00:00
jif-oai
c7ea34483d Merge branch 'main' into jif/prompt-cleaning 2025-12-03 09:54:31 +00:00
jif-oai
70810cb547 More 2025-12-02 10:09:37 +00:00
jif-oai
37138e64a7 move push_input to tests 2025-12-02 10:08:29 +00:00
jif-oai
e21b053207 Useless import 2025-12-01 11:19:21 +00:00
jif-oai
da534afe3d Cleaning for compaction 2025-12-01 11:17:09 +00:00
jif-oai
cac05eddc1 Add a validation 2025-12-01 10:50:16 +00:00
jif-oai
ca8f63f771 chore: add PromptBuilder 2025-12-01 10:33:37 +00:00
14 changed files with 270 additions and 133 deletions

View File

@@ -1,6 +1,6 @@
use crate::ChatRequest;
use crate::Prompt;
use crate::auth::AuthProvider;
use crate::common::Prompt as ApiPrompt;
use crate::common::ResponseEvent;
use crate::common::ResponseStream;
use crate::endpoint::streaming::StreamingClient;
@@ -52,7 +52,7 @@ impl<T: HttpTransport, A: AuthProvider> ChatClient<T, A> {
pub async fn stream_prompt(
&self,
model: &str,
prompt: &ApiPrompt,
prompt: &Prompt,
conversation_id: Option<String>,
session_source: Option<SessionSource>,
) -> Result<ResponseStream, ApiError> {

View File

@@ -45,7 +45,7 @@ impl<T: HttpTransport, A: AuthProvider> CompactClient<T, A> {
}
}
pub async fn compact(
async fn compact(
&self,
body: serde_json::Value,
extra_headers: HeaderMap,

View File

@@ -1,5 +1,5 @@
use crate::Prompt;
use crate::auth::AuthProvider;
use crate::common::Prompt as ApiPrompt;
use crate::common::Reasoning;
use crate::common::ResponseStream;
use crate::common::TextControls;
@@ -50,17 +50,14 @@ impl<T: HttpTransport, A: AuthProvider> ResponsesClient<T, A> {
}
}
pub async fn stream_request(
&self,
request: ResponsesRequest,
) -> Result<ResponseStream, ApiError> {
async fn stream_request(&self, request: ResponsesRequest) -> Result<ResponseStream, ApiError> {
self.stream(request.body, request.headers).await
}
pub async fn stream_prompt(
&self,
model: &str,
prompt: &ApiPrompt,
prompt: &Prompt,
options: ResponsesOptions,
) -> Result<ResponseStream, ApiError> {
let ResponsesOptions {
@@ -95,6 +92,7 @@ impl<T: HttpTransport, A: AuthProvider> ResponsesClient<T, A> {
}
}
// Pub mainly for testing purpose.
pub async fn stream(
&self,
body: Value,

View File

@@ -5,8 +5,6 @@ use crate::api_bridge::map_api_error;
use codex_api::AggregateStreamExt;
use codex_api::ChatClient as ApiChatClient;
use codex_api::CompactClient as ApiCompactClient;
use codex_api::CompactionInput as ApiCompactionInput;
use codex_api::Prompt as ApiPrompt;
use codex_api::RequestTelemetry;
use codex_api::ReqwestTransport;
use codex_api::ResponseStream as ApiResponseStream;
@@ -31,14 +29,13 @@ use http::HeaderMap as ApiHeaderMap;
use http::HeaderValue;
use http::StatusCode as HttpStatusCode;
use reqwest::StatusCode;
use serde_json::Value;
use std::time::Duration;
use tokio::sync::mpsc;
use tracing::warn;
use crate::AuthManager;
use crate::auth::RefreshTokenError;
use crate::client_common::Prompt;
use crate::client_common::PromptBuilder;
use crate::client_common::ResponseEvent;
use crate::client_common::ResponseStream;
use crate::config::Config;
@@ -50,8 +47,6 @@ use crate::model_provider_info::ModelProviderInfo;
use crate::model_provider_info::WireApi;
use crate::openai_model_info::get_model_info;
use crate::openai_models::model_family::ModelFamily;
use crate::tools::spec::create_tools_json_for_chat_completions_api;
use crate::tools::spec::create_tools_json_for_responses_api;
#[derive(Debug, Clone)]
pub struct ModelClient {
@@ -116,7 +111,9 @@ impl ModelClient {
///
/// For Chat providers, the underlying stream is optionally aggregated
/// based on the `show_raw_agent_reasoning` flag in the config.
pub async fn stream(&self, prompt: &Prompt) -> Result<ResponseStream> {
pub async fn stream(&self, prompt: &PromptBuilder) -> Result<ResponseStream> {
self.validate_prompt_wire_api(prompt)?;
match self.provider.wire_api {
WireApi::Responses => self.stream_responses_api(prompt).await,
WireApi::Chat => {
@@ -137,23 +134,24 @@ impl ModelClient {
}
}
fn validate_prompt_wire_api(&self, prompt: &PromptBuilder) -> Result<()> {
if prompt.wire_api != self.provider.wire_api {
return Err(CodexErr::UnsupportedOperation(format!(
"prompt configured for {:?} wire API but provider {} expects {:?}",
prompt.wire_api, self.provider.name, self.provider.wire_api
)));
}
Ok(())
}
/// Streams a turn via the OpenAI Chat Completions API.
///
/// This path is only used when the provider is configured with
/// `WireApi::Chat`; it does not support `output_schema` today.
async fn stream_chat_completions(&self, prompt: &Prompt) -> Result<ApiResponseStream> {
if prompt.output_schema.is_some() {
return Err(CodexErr::UnsupportedOperation(
"output_schema is not supported for Chat Completions API".to_string(),
));
}
async fn stream_chat_completions(&self, prompt: &PromptBuilder) -> Result<ApiResponseStream> {
let auth_manager = self.auth_manager.clone();
let instructions = prompt
.get_full_instructions(&self.config.model_family)
.into_owned();
let tools_json = create_tools_json_for_chat_completions_api(&prompt.tools)?;
let api_prompt = build_api_prompt(prompt, instructions, tools_json);
let api_prompt = prompt.build(&self.config.model_family)?;
let conversation_id = self.conversation_id.to_string();
let session_source = self.session_source.clone();
@@ -195,7 +193,7 @@ impl ModelClient {
///
/// Handles SSE fixtures, reasoning summaries, verbosity, and the
/// `text` controls used for output schemas.
async fn stream_responses_api(&self, prompt: &Prompt) -> Result<ResponseStream> {
async fn stream_responses_api(&self, prompt: &PromptBuilder) -> Result<ResponseStream> {
if let Some(path) = &*CODEX_RS_SSE_FIXTURE {
warn!(path, "Streaming from fixture");
let stream = codex_api::stream_from_fixture(path, self.provider.stream_idle_timeout())
@@ -204,10 +202,6 @@ impl ModelClient {
}
let auth_manager = self.auth_manager.clone();
let instructions = prompt
.get_full_instructions(&self.config.model_family)
.into_owned();
let tools_json: Vec<Value> = create_tools_json_for_responses_api(&prompt.tools)?;
let reasoning = if self.config.model_family.supports_reasoning_summaries {
Some(Reasoning {
@@ -241,7 +235,7 @@ impl ModelClient {
};
let text = create_text_param_for_request(verbosity, &prompt.output_schema);
let api_prompt = build_api_prompt(prompt, instructions.clone(), tools_json);
let api_prompt = prompt.build(&self.config.model_family)?;
let conversation_id = self.conversation_id.to_string();
let session_source = self.session_source.clone();
@@ -326,8 +320,12 @@ impl ModelClient {
///
/// This is a unary call (no streaming) that returns a new list of
/// `ResponseItem`s representing the compacted transcript.
pub async fn compact_conversation_history(&self, prompt: &Prompt) -> Result<Vec<ResponseItem>> {
if prompt.input.is_empty() {
pub async fn compact_conversation_history(
&self,
input: &[ResponseItem],
instructions: &str,
) -> Result<Vec<ResponseItem>> {
if input.is_empty() {
return Ok(Vec::new());
}
let auth_manager = self.auth_manager.clone();
@@ -341,13 +339,10 @@ impl ModelClient {
let client = ApiCompactClient::new(transport, api_provider, api_auth)
.with_telemetry(Some(request_telemetry));
let instructions = prompt
.get_full_instructions(&self.config.model_family)
.into_owned();
let payload = ApiCompactionInput {
let payload = codex_api::CompactionInput {
model: &self.config.model,
input: &prompt.input,
instructions: &instructions,
input,
instructions,
};
let mut extra_headers = ApiHeaderMap::new();
@@ -389,17 +384,6 @@ impl ModelClient {
}
}
/// Adapts the core `Prompt` type into the `codex-api` payload shape.
fn build_api_prompt(prompt: &Prompt, instructions: String, tools_json: Vec<Value>) -> ApiPrompt {
ApiPrompt {
instructions,
input: prompt.get_formatted_input(),
tools: tools_json,
parallel_tool_calls: prompt.parallel_tool_calls,
output_schema: prompt.output_schema.clone(),
}
}
fn map_response_stream<S>(api_stream: S, otel_event_manager: OtelEventManager) -> ResponseStream
where
S: futures::Stream<Item = std::result::Result<ResponseEvent, ApiError>>

View File

@@ -1,6 +1,11 @@
use crate::client_common::tools::ToolSpec;
use crate::error::CodexErr;
use crate::error::Result;
use crate::model_provider_info::WireApi;
use crate::openai_models::model_family::ModelFamily;
use crate::tools::spec::create_tools_json_for_chat_completions_api;
use crate::tools::spec::create_tools_json_for_responses_api;
use codex_api::Prompt;
pub use codex_api::common::ResponseEvent;
use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS;
use codex_protocol::models::ResponseItem;
@@ -23,12 +28,15 @@ pub const REVIEW_EXIT_SUCCESS_TMPL: &str = include_str!("../templates/review/exi
pub const REVIEW_EXIT_INTERRUPTED_TMPL: &str =
include_str!("../templates/review/exit_interrupted.xml");
/// API request payload for a single model turn
/// Builder for a single model turn.
#[derive(Default, Debug, Clone)]
pub struct Prompt {
pub struct PromptBuilder {
/// Conversation context input items.
pub input: Vec<ResponseItem>,
/// Target wire API for this prompt.
pub wire_api: WireApi,
/// Tools available to the model, including additional tools sourced from
/// external MCP servers.
pub(crate) tools: Vec<ToolSpec>,
@@ -43,7 +51,108 @@ pub struct Prompt {
pub output_schema: Option<Value>,
}
impl Prompt {
impl PromptBuilder {
pub fn new() -> Self {
Self {
wire_api: WireApi::Responses,
..Self::default()
}
}
pub fn with_input(mut self, input: Vec<ResponseItem>) -> Self {
self.input = input;
self
}
/// For integration tests only.
pub fn push_input(&mut self, item: ResponseItem) {
self.input.push(item);
}
pub(crate) fn with_tools(mut self, tools: Vec<ToolSpec>) -> Self {
self.tools = tools;
self
}
pub fn with_parallel_tool_calls(mut self, enabled: bool) -> Self {
self.parallel_tool_calls = enabled;
self
}
pub fn with_base_instructions_override<S: Into<String>>(mut self, instructions: S) -> Self {
self.base_instructions_override = Some(instructions.into());
self
}
pub fn with_base_instructions_override_opt(mut self, instructions: Option<String>) -> Self {
self.base_instructions_override = instructions;
self
}
pub fn with_output_schema(mut self, schema: Value) -> Self {
self.output_schema = Some(schema);
self
}
pub fn with_output_schema_opt(mut self, schema: Option<Value>) -> Self {
self.output_schema = schema;
self
}
pub fn clear_output_schema(mut self) -> Self {
self.output_schema = None;
self
}
pub fn with_wire_api(self, wire_api: WireApi) -> Self {
self.wire_api(wire_api)
}
pub fn wire_api(mut self, wire_api: WireApi) -> Self {
self.wire_api = wire_api;
self
}
pub fn chat(mut self) -> Self {
self.wire_api = WireApi::Chat;
self
}
pub fn responses(mut self) -> Self {
self.wire_api = WireApi::Responses;
self
}
/// Builds a wire-level `codex_api::Prompt` for the configured `wire_api`.
///
/// - For `WireApi::Responses`, tools are encoded using the Responses
/// function-calling shape.
/// - For `WireApi::Chat`, tools are encoded using the Chat Completions
/// function-calling shape, and `output_schema` is not supported.
pub fn build(&self, model: &ModelFamily) -> Result<Prompt> {
if matches!(self.wire_api, WireApi::Chat) && self.output_schema.is_some() {
return Err(CodexErr::UnsupportedOperation(
"output_schema is not supported for Chat Completions API".to_string(),
));
}
let instructions = self.get_full_instructions(model).into_owned();
let input = self.get_formatted_input();
let tools_json = match self.wire_api {
WireApi::Responses => create_tools_json_for_responses_api(&self.tools)?,
WireApi::Chat => create_tools_json_for_chat_completions_api(&self.tools)?,
};
Ok(Prompt {
instructions,
input,
tools: tools_json,
parallel_tool_calls: self.parallel_tool_calls,
output_schema: self.output_schema.clone(),
})
}
pub(crate) fn get_full_instructions<'a>(&'a self, model: &'a ModelFamily) -> Cow<'a, str> {
let base = self
.base_instructions_override
@@ -267,7 +376,7 @@ mod tests {
}
#[test]
fn get_full_instructions_no_user_content() {
let prompt = Prompt {
let prompt = PromptBuilder {
..Default::default()
};
let test_cases = vec![

View File

@@ -63,7 +63,7 @@ use tracing::warn;
use crate::ModelProviderInfo;
use crate::client::ModelClient;
use crate::client_common::Prompt;
use crate::client_common::PromptBuilder;
use crate::client_common::ResponseEvent;
use crate::compact::collect_user_messages;
use crate::config::Config;
@@ -2157,13 +2157,13 @@ async fn run_turn(
new_instructions.push_str(INSTRUCTIONS);
base_instructions = Some(new_instructions);
}
let prompt = Prompt {
input,
tools: router.specs(),
parallel_tool_calls,
base_instructions_override: base_instructions,
output_schema: turn_context.final_output_json_schema.clone(),
};
let prompt = PromptBuilder::new()
.wire_api(turn_context.client.get_provider().wire_api)
.with_input(input)
.with_tools(router.specs())
.with_parallel_tool_calls(parallel_tool_calls)
.with_base_instructions_override_opt(base_instructions)
.with_output_schema_opt(turn_context.final_output_json_schema.clone());
let mut retries = 0;
loop {
@@ -2252,7 +2252,7 @@ async fn try_run_turn(
sess: Arc<Session>,
turn_context: Arc<TurnContext>,
turn_diff_tracker: SharedTurnDiffTracker,
prompt: &Prompt,
prompt: &PromptBuilder,
cancellation_token: CancellationToken,
) -> CodexResult<Vec<ProcessedResponseItem>> {
let rollout_item = RolloutItem::TurnContext(TurnContextItem {

View File

@@ -1,6 +1,6 @@
use std::sync::Arc;
use crate::Prompt;
use crate::client_common::PromptBuilder;
use crate::client_common::ResponseEvent;
use crate::codex::Session;
use crate::codex::TurnContext;
@@ -93,10 +93,9 @@ async fn run_compact_task_inner(
loop {
let turn_input = history.get_history_for_prompt();
let prompt = Prompt {
input: turn_input.clone(),
..Default::default()
};
let prompt = PromptBuilder::new()
.wire_api(turn_context.client.get_provider().wire_api)
.with_input(turn_input.clone());
let attempt_result = drain_to_completed(&sess, turn_context.as_ref(), &prompt).await;
match attempt_result {
@@ -290,7 +289,7 @@ fn build_compacted_history_with_limit(
async fn drain_to_completed(
sess: &Session,
turn_context: &TurnContext,
prompt: &Prompt,
prompt: &PromptBuilder,
) -> CodexResult<()> {
let mut stream = turn_context.client.clone().stream(prompt).await?;
loop {

View File

@@ -1,6 +1,6 @@
use std::sync::Arc;
use crate::Prompt;
use crate::PromptBuilder;
use crate::codex::Session;
use crate::codex::TurnContext;
use crate::error::Result as CodexResult;
@@ -41,17 +41,14 @@ async fn run_remote_compact_task_inner_impl(
turn_context: &Arc<TurnContext>,
) -> CodexResult<()> {
let mut history = sess.clone_history().await;
let prompt = Prompt {
input: history.get_history_for_prompt(),
tools: vec![],
parallel_tool_calls: false,
base_instructions_override: turn_context.base_instructions.clone(),
output_schema: None,
};
let prompt = PromptBuilder::new()
.with_input(history.get_history_for_prompt())
.with_base_instructions_override_opt(turn_context.base_instructions.clone())
.build(&turn_context.client.get_model_family())?;
let mut new_history = turn_context
.client
.compact_conversation_history(&prompt)
.compact_conversation_history(&prompt.input, &prompt.instructions)
.await?;
// Required to keep `/undo` available after compaction
let ghost_snapshots: Vec<ResponseItem> = history

View File

@@ -108,7 +108,7 @@ pub use codex_protocol::protocol;
pub use codex_protocol::config_types as protocol_config_types;
pub use client::ModelClient;
pub use client_common::Prompt;
pub use client_common::PromptBuilder;
pub use client_common::REVIEW_PROMPT;
pub use client_common::ResponseEvent;
pub use client_common::ResponseStream;

View File

@@ -7,7 +7,7 @@ use std::time::Instant;
use crate::AuthManager;
use crate::ModelProviderInfo;
use crate::client::ModelClient;
use crate::client_common::Prompt;
use crate::client_common::PromptBuilder;
use crate::client_common::ResponseEvent;
use crate::config::Config;
use crate::protocol::SandboxPolicy;
@@ -112,17 +112,15 @@ pub(crate) async fn assess_command(
.trim()
.to_string();
let prompt = Prompt {
input: vec![ResponseItem::Message {
let prompt = PromptBuilder::new()
.wire_api(provider.wire_api)
.with_input(vec![ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText { text: user_prompt }],
}],
tools: Vec::new(),
parallel_tool_calls: false,
base_instructions_override: Some(system_prompt),
output_schema: Some(sandbox_assessment_schema()),
};
}])
.with_base_instructions_override(system_prompt)
.with_output_schema(sandbox_assessment_schema());
let child_otel =
parent_otel.with_model(config.model.as_str(), config.model_family.slug.as_str());

View File

@@ -7,9 +7,10 @@ use codex_core::LocalShellExecAction;
use codex_core::LocalShellStatus;
use codex_core::ModelClient;
use codex_core::ModelProviderInfo;
use codex_core::Prompt;
use codex_core::PromptBuilder;
use codex_core::ResponseItem;
use codex_core::WireApi;
use codex_core::error::CodexErr;
use codex_otel::otel_event_manager::OtelEventManager;
use codex_protocol::ConversationId;
use codex_protocol::models::ReasoningItemContent;
@@ -41,21 +42,7 @@ async fn run_request(input: Vec<ResponseItem>) -> Value {
.mount(&server)
.await;
let provider = ModelProviderInfo {
name: "mock".into(),
base_url: Some(format!("{}/v1", server.uri())),
env_key: None,
env_key_instructions: None,
experimental_bearer_token: None,
wire_api: WireApi::Chat,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(5_000),
requires_openai_auth: false,
};
let provider = chat_provider(format!("{}/v1", server.uri()));
let codex_home = match TempDir::new() {
Ok(dir) => dir,
@@ -93,8 +80,7 @@ async fn run_request(input: Vec<ResponseItem>) -> Value {
codex_protocol::protocol::SessionSource::Exec,
);
let mut prompt = Prompt::default();
prompt.input = input;
let prompt = PromptBuilder::new().chat().with_input(input);
let mut stream = match client.stream(&prompt).await {
Ok(s) => s,
@@ -185,6 +171,24 @@ fn first_assistant(messages: &[Value]) -> &Value {
}
}
fn chat_provider(base_url: String) -> ModelProviderInfo {
ModelProviderInfo {
name: "mock".into(),
base_url: Some(base_url),
env_key: None,
env_key_instructions: None,
experimental_bearer_token: None,
wire_api: WireApi::Chat,
query_params: None,
http_headers: None,
env_http_headers: None,
request_max_retries: Some(0),
stream_max_retries: Some(0),
stream_idle_timeout_ms: Some(5_000),
requires_openai_auth: false,
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn omits_reasoning_when_none_present() {
skip_if_no_network!();
@@ -317,3 +321,52 @@ async fn suppresses_duplicate_assistant_messages() {
Value::String("dup".into())
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn errors_on_mismatched_prompt_wire_api() {
let provider = chat_provider("https://example.com/v1".into());
let codex_home = match TempDir::new() {
Ok(dir) => dir,
Err(e) => panic!("failed to create TempDir: {e}"),
};
let mut config = load_default_config_for_test(&codex_home);
config.model_provider_id = provider.name.clone();
config.model_provider = provider.clone();
config.show_raw_agent_reasoning = true;
let effort = config.model_reasoning_effort;
let summary = config.model_reasoning_summary;
let config = Arc::new(config);
let conversation_id = ConversationId::new();
let otel_event_manager = OtelEventManager::new(
conversation_id,
config.model.as_str(),
config.model_family.slug.as_str(),
None,
Some("test@test.com".to_string()),
Some(AuthMode::ChatGPT),
false,
"test".to_string(),
);
let client = ModelClient::new(
Arc::clone(&config),
None,
otel_event_manager,
provider,
effort,
summary,
conversation_id,
codex_protocol::protocol::SessionSource::Exec,
);
let prompt = PromptBuilder::new().with_input(vec![user_message("u1")]);
let err = match client.stream(&prompt).await {
Ok(_) => panic!("wire API mismatch should error before sending request"),
Err(e) => e,
};
assert!(matches!(err, CodexErr::UnsupportedOperation(_)));
}

View File

@@ -6,7 +6,7 @@ use codex_app_server_protocol::AuthMode;
use codex_core::ContentItem;
use codex_core::ModelClient;
use codex_core::ModelProviderInfo;
use codex_core::Prompt;
use codex_core::PromptBuilder;
use codex_core::ResponseEvent;
use codex_core::ResponseItem;
use codex_core::WireApi;
@@ -93,14 +93,15 @@ async fn run_stream_with_bytes(sse_body: &[u8]) -> Vec<ResponseEvent> {
codex_protocol::protocol::SessionSource::Exec,
);
let mut prompt = Prompt::default();
prompt.input = vec![ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: "hello".to_string(),
}],
}];
let prompt = PromptBuilder::new()
.chat()
.with_input(vec![ResponseItem::Message {
id: None,
role: "user".to_string(),
content: vec![ContentItem::InputText {
text: "hello".to_string(),
}],
}]);
let mut stream = match client.stream(&prompt).await {
Ok(s) => s,

View File

@@ -4,7 +4,7 @@ use codex_app_server_protocol::AuthMode;
use codex_core::ContentItem;
use codex_core::ModelClient;
use codex_core::ModelProviderInfo;
use codex_core::Prompt;
use codex_core::PromptBuilder;
use codex_core::ResponseEvent;
use codex_core::ResponseItem;
use codex_core::WireApi;
@@ -82,14 +82,13 @@ async fn responses_stream_includes_subagent_header_on_review() {
SessionSource::SubAgent(codex_protocol::protocol::SubAgentSource::Review),
);
let mut prompt = Prompt::default();
prompt.input = vec![ResponseItem::Message {
let prompt = PromptBuilder::new().with_input(vec![ResponseItem::Message {
id: None,
role: "user".into(),
content: vec![ContentItem::InputText {
text: "hello".into(),
}],
}];
}]);
let mut stream = client.stream(&prompt).await.expect("stream failed");
while let Some(event) = stream.next().await {
@@ -172,14 +171,13 @@ async fn responses_stream_includes_subagent_header_on_other() {
)),
);
let mut prompt = Prompt::default();
prompt.input = vec![ResponseItem::Message {
let prompt = PromptBuilder::new().with_input(vec![ResponseItem::Message {
id: None,
role: "user".into(),
content: vec![ContentItem::InputText {
text: "hello".into(),
}],
}];
}]);
let mut stream = client.stream(&prompt).await.expect("stream failed");
while let Some(event) = stream.next().await {

View File

@@ -8,7 +8,7 @@ use codex_core::LocalShellStatus;
use codex_core::ModelClient;
use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
use codex_core::Prompt;
use codex_core::PromptBuilder;
use codex_core::ResponseEvent;
use codex_core::ResponseItem;
use codex_core::WireApi;
@@ -1040,8 +1040,8 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
codex_protocol::protocol::SessionSource::Exec,
);
let mut prompt = Prompt::default();
prompt.input.push(ResponseItem::Reasoning {
let mut prompt = PromptBuilder::new();
prompt.push_input(ResponseItem::Reasoning {
id: "reasoning-id".into(),
summary: vec![ReasoningItemReasoningSummary::SummaryText {
text: "summary".into(),
@@ -1051,27 +1051,27 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
}]),
encrypted_content: None,
});
prompt.input.push(ResponseItem::Message {
prompt.push_input(ResponseItem::Message {
id: Some("message-id".into()),
role: "assistant".into(),
content: vec![ContentItem::OutputText {
text: "message".into(),
}],
});
prompt.input.push(ResponseItem::WebSearchCall {
prompt.push_input(ResponseItem::WebSearchCall {
id: Some("web-search-id".into()),
status: Some("completed".into()),
action: WebSearchAction::Search {
query: Some("weather".into()),
},
});
prompt.input.push(ResponseItem::FunctionCall {
prompt.push_input(ResponseItem::FunctionCall {
id: Some("function-id".into()),
name: "do_thing".into(),
arguments: "{}".into(),
call_id: "function-call-id".into(),
});
prompt.input.push(ResponseItem::LocalShellCall {
prompt.push_input(ResponseItem::LocalShellCall {
id: Some("local-shell-id".into()),
call_id: Some("local-shell-call-id".into()),
status: LocalShellStatus::Completed,
@@ -1083,7 +1083,7 @@ async fn azure_responses_request_includes_store_and_reasoning_ids() {
user: None,
}),
});
prompt.input.push(ResponseItem::CustomToolCall {
prompt.push_input(ResponseItem::CustomToolCall {
id: Some("custom-tool-id".into()),
status: Some("completed".into()),
call_id: "custom-tool-call-id".into(),