Compare commits

...

3 Commits

Author SHA1 Message Date
Michael Bolin
67a769daf1 bazel: add explicit rust test shard labels
Generate separate Bazel test labels for selected large Rust test targets so BuildBuddy can report timing and flakiness per shard. Keep the original aggregate target names as test_suites over the generated shard targets.

For integration tests, compile one manual *-all-test-bin rust_test and make each shard label a lightweight wrapper around that binary. This preserves distinct BuildBuddy labels without compiling the same test crate once per shard.

Patch the pinned rules_rust archive with the stable name-hash sharding, explicit RULES_RUST_TEST_* env support, Windows manifest fallback, Windows-safe PowerShell UInt32 masking, and isolated Windows shard temp files from hermeticbuild/rules_rust#14 until Codex can bump to a merged rules_rust commit that contains it.

Co-authored-by: Codex <noreply@openai.com>
2026-04-15 22:09:52 -07:00
Matthew Zeng
224dad41ac [codex][mcp] Add resource uri meta to tool call item. (#17831)
- [x] Add resource uri meta to tool call item so that the app-server
client can start prefetching resources immediately without loading mcp
server status.
2026-04-16 05:09:17 +00:00
Matthew Zeng
77fe33bf72 Update ToolSearch to be enabled by default (#17854)
## Summary
- Promote `Feature::ToolSearch` to `Stable` and enable it in the default
feature set
- Update feature tests and tool registry coverage to match the new
default
- Adjust the search-tool integration test to assert the default-on path
and explicit disable fallback

## Testing
- `just fmt`
- `cargo test -p codex-features`
- `cargo test -p codex-core --test all search_tool`
- `cargo test -p codex-tools`
2026-04-15 22:01:05 -07:00
49 changed files with 1142 additions and 232 deletions

View File

@@ -37,6 +37,8 @@ alias(
exports_files([
"AGENTS.md",
"test_binary_test_launcher.bat.tpl",
"test_binary_test_launcher.sh.tpl",
"workspace_root_test_launcher.bat.tpl",
"workspace_root_test_launcher.sh.tpl",
])

View File

@@ -94,10 +94,20 @@ single_version_override(
)
rules_rust = use_extension("@rules_rs//rs/experimental:rules_rust.bzl", "rules_rust")
# Build-script probe binaries inherit CFLAGS/CXXFLAGS from Bazel's C++
# toolchain. On `windows-gnullvm`, llvm-mingw does not ship
# `libssp_nonshared`, so strip the forwarded stack-protector flags there.
rules_rust.patch(
bazel_dep(name = "rules_rust", version = "0.69.0")
# `rules_rs` 0.0.43 pins an older hermeticbuild/rules_rust commit. Override the
# extension-created repo so Codex can use rust_test sharding from #14 without
# updating the entire rules_rs module.
#
# Build-script probe binaries inherit CFLAGS/CXXFLAGS from Bazel's C++ toolchain.
# On `windows-gnullvm`, llvm-mingw does not ship `libssp_nonshared`, so strip the
# forwarded stack-protector flags there.
archive_override(
module_name = "rules_rust",
integrity = "sha256-q37yA5WS3++LIgdkThIhxsQkoZ5F/+DD4GhTWkY1rkE=",
patch_strip = 1,
patches = [
"//patches:rules_rust_windows_gnullvm_build_script.patch",
"//patches:rules_rust_windows_exec_msvc_build_script_env.patch",
@@ -105,14 +115,19 @@ rules_rust.patch(
"//patches:rules_rust_windows_build_script_runner_paths.patch",
"//patches:rules_rust_windows_msvc_direct_link_args.patch",
"//patches:rules_rust_windows_process_wrapper_skip_temp_outputs.patch",
"//patches:rules_rust_stable_explicit_test_shards.patch",
"//patches:rules_rust_windows_exec_bin_target.patch",
"//patches:rules_rust_windows_exec_std.patch",
"//patches:rules_rust_windows_exec_rustc_dev_rlib.patch",
"//patches:rules_rust_repository_set_exec_constraints.patch",
],
strip = 1,
strip_prefix = "rules_rust-10825e1a40b8f6d979c51bc795c9d3fa7f78cc6f",
urls = ["https://github.com/hermeticbuild/rules_rust/archive/10825e1a40b8f6d979c51bc795c9d3fa7f78cc6f.tar.gz"],
)
override_repo(
rules_rust,
rules_rust = "rules_rust",
)
use_repo(rules_rust, "rules_rust")
nightly_rust = use_extension(
"@rules_rs//rs/experimental:rules_rust_reexported_extensions.bzl",

100
MODULE.bazel.lock generated
View File

@@ -579,6 +579,106 @@
}
}
}
},
"@@rules_rust+//crate_universe/private:internal_extensions.bzl%cu_nr": {
"general": {
"bzlTransitiveDigest": "Z9J2ln1eSy/cLWVaYkKgLO0MKnGph0v9SLVH0brHj+E=",
"usagesDigest": "qhmy4zrnFb6knNQRx2XBCVfHObom0vnflm+sibd7SA0=",
"recordedInputs": [
"REPO_MAPPING:bazel_features+,bazel_features_globals bazel_features++version_extension+bazel_features_globals",
"REPO_MAPPING:bazel_features+,bazel_features_version bazel_features++version_extension+bazel_features_version",
"REPO_MAPPING:rules_cc+,bazel_skylib bazel_skylib+",
"REPO_MAPPING:rules_cc+,bazel_tools bazel_tools",
"REPO_MAPPING:rules_cc+,cc_compatibility_proxy rules_cc++compatibility_proxy+cc_compatibility_proxy",
"REPO_MAPPING:rules_cc+,platforms platforms",
"REPO_MAPPING:rules_cc+,rules_cc rules_cc+",
"REPO_MAPPING:rules_cc++compatibility_proxy+cc_compatibility_proxy,rules_cc rules_cc+",
"REPO_MAPPING:rules_rust+,bazel_features bazel_features+",
"REPO_MAPPING:rules_rust+,bazel_skylib bazel_skylib+",
"REPO_MAPPING:rules_rust+,bazel_tools bazel_tools",
"REPO_MAPPING:rules_rust+,cargo_bazel_bootstrap rules_rust++cu_nr+cargo_bazel_bootstrap",
"REPO_MAPPING:rules_rust+,cui rules_rust++cu+cui",
"REPO_MAPPING:rules_rust+,rrc rules_rust++i2+rrc",
"REPO_MAPPING:rules_rust+,rules_cc rules_cc+",
"REPO_MAPPING:rules_rust+,rules_rust rules_rust+"
],
"generatedRepoSpecs": {
"cargo_bazel_bootstrap": {
"repoRuleId": "@@rules_rust+//cargo/private:cargo_bootstrap.bzl%cargo_bootstrap_repository",
"attributes": {
"srcs": [
"@@rules_rust+//crate_universe:src/api.rs",
"@@rules_rust+//crate_universe:src/api/lockfile.rs",
"@@rules_rust+//crate_universe:src/cli.rs",
"@@rules_rust+//crate_universe:src/cli/generate.rs",
"@@rules_rust+//crate_universe:src/cli/query.rs",
"@@rules_rust+//crate_universe:src/cli/render.rs",
"@@rules_rust+//crate_universe:src/cli/splice.rs",
"@@rules_rust+//crate_universe:src/cli/vendor.rs",
"@@rules_rust+//crate_universe:src/config.rs",
"@@rules_rust+//crate_universe:src/context.rs",
"@@rules_rust+//crate_universe:src/context/crate_context.rs",
"@@rules_rust+//crate_universe:src/context/platforms.rs",
"@@rules_rust+//crate_universe:src/lib.rs",
"@@rules_rust+//crate_universe:src/lockfile.rs",
"@@rules_rust+//crate_universe:src/main.rs",
"@@rules_rust+//crate_universe:src/metadata.rs",
"@@rules_rust+//crate_universe:src/metadata/cargo_bin.rs",
"@@rules_rust+//crate_universe:src/metadata/cargo_tree_resolver.rs",
"@@rules_rust+//crate_universe:src/metadata/cargo_tree_rustc_wrapper.bat",
"@@rules_rust+//crate_universe:src/metadata/cargo_tree_rustc_wrapper.sh",
"@@rules_rust+//crate_universe:src/metadata/dependency.rs",
"@@rules_rust+//crate_universe:src/metadata/metadata_annotation.rs",
"@@rules_rust+//crate_universe:src/rendering.rs",
"@@rules_rust+//crate_universe:src/rendering/template_engine.rs",
"@@rules_rust+//crate_universe:src/rendering/templates/module_bzl.j2",
"@@rules_rust+//crate_universe:src/rendering/templates/partials/header.j2",
"@@rules_rust+//crate_universe:src/rendering/templates/partials/module/aliases_map.j2",
"@@rules_rust+//crate_universe:src/rendering/templates/partials/module/deps_map.j2",
"@@rules_rust+//crate_universe:src/rendering/templates/partials/module/repo_git.j2",
"@@rules_rust+//crate_universe:src/rendering/templates/partials/module/repo_http.j2",
"@@rules_rust+//crate_universe:src/rendering/templates/vendor_module.j2",
"@@rules_rust+//crate_universe:src/rendering/verbatim/alias_rules.bzl",
"@@rules_rust+//crate_universe:src/select.rs",
"@@rules_rust+//crate_universe:src/splicing.rs",
"@@rules_rust+//crate_universe:src/splicing/cargo_config.rs",
"@@rules_rust+//crate_universe:src/splicing/crate_index_lookup.rs",
"@@rules_rust+//crate_universe:src/splicing/splicer.rs",
"@@rules_rust+//crate_universe:src/test.rs",
"@@rules_rust+//crate_universe:src/utils.rs",
"@@rules_rust+//crate_universe:src/utils/starlark.rs",
"@@rules_rust+//crate_universe:src/utils/starlark/glob.rs",
"@@rules_rust+//crate_universe:src/utils/starlark/label.rs",
"@@rules_rust+//crate_universe:src/utils/starlark/select.rs",
"@@rules_rust+//crate_universe:src/utils/starlark/select_dict.rs",
"@@rules_rust+//crate_universe:src/utils/starlark/select_list.rs",
"@@rules_rust+//crate_universe:src/utils/starlark/select_scalar.rs",
"@@rules_rust+//crate_universe:src/utils/starlark/select_set.rs",
"@@rules_rust+//crate_universe:src/utils/starlark/serialize.rs",
"@@rules_rust+//crate_universe:src/utils/starlark/target_compatible_with.rs",
"@@rules_rust+//crate_universe:src/utils/symlink.rs",
"@@rules_rust+//crate_universe:src/utils/target_triple.rs"
],
"binary": "cargo-bazel",
"cargo_lockfile": "@@rules_rust+//crate_universe:Cargo.lock",
"cargo_toml": "@@rules_rust+//crate_universe:Cargo.toml",
"version": "1.94.1",
"timeout": 900,
"rust_toolchain_cargo_template": "@rust_host_tools//:bin/{tool}",
"rust_toolchain_rustc_template": "@rust_host_tools//:bin/{tool}",
"compressed_windows_toolchain_names": false
}
}
},
"moduleExtensionMetadata": {
"explicitRootModuleDirectDeps": [
"cargo_bazel_bootstrap"
],
"explicitRootModuleDirectDevDeps": [],
"useAllRepos": "NO",
"reproducible": false
}
}
}
},
"facts": {

View File

@@ -2891,6 +2891,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -13369,6 +13369,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -11213,6 +11213,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -787,6 +787,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -787,6 +787,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -930,6 +930,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -1444,6 +1444,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -1206,6 +1206,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -1206,6 +1206,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -1206,6 +1206,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -1444,6 +1444,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -1206,6 +1206,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -1444,6 +1444,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -1206,6 +1206,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -1206,6 +1206,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -930,6 +930,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -930,6 +930,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -930,6 +930,12 @@
"id": {
"type": "string"
},
"mcpAppResourceUri": {
"type": [
"string",
"null"
]
},
"result": {
"anyOf": [
{

View File

@@ -53,7 +53,7 @@ exitCode: number | null,
/**
* The duration of the command execution in milliseconds.
*/
durationMs: number | null, } | { "type": "fileChange", id: string, changes: Array<FileUpdateChange>, status: PatchApplyStatus, } | { "type": "mcpToolCall", id: string, server: string, tool: string, status: McpToolCallStatus, arguments: JsonValue, result: McpToolCallResult | null, error: McpToolCallError | null,
durationMs: number | null, } | { "type": "fileChange", id: string, changes: Array<FileUpdateChange>, status: PatchApplyStatus, } | { "type": "mcpToolCall", id: string, server: string, tool: string, status: McpToolCallStatus, arguments: JsonValue, mcpAppResourceUri?: string, result: McpToolCallResult | null, error: McpToolCallError | null,
/**
* The duration of the MCP tool call in milliseconds.
*/

View File

@@ -764,6 +764,7 @@ macro_rules! server_notification_definitions {
Display,
ExperimentalApi,
)]
#[allow(clippy::large_enum_variant)]
#[serde(tag = "method", content = "params", rename_all = "camelCase")]
#[strum(serialize_all = "camelCase")]
pub enum ServerNotification {

View File

@@ -502,6 +502,7 @@ impl ThreadHistoryBuilder {
.arguments
.clone()
.unwrap_or(serde_json::Value::Null),
mcp_app_resource_uri: payload.mcp_app_resource_uri.clone(),
result: None,
error: None,
duration_ms: None,
@@ -518,11 +519,11 @@ impl ThreadHistoryBuilder {
let duration_ms = i64::try_from(payload.duration.as_millis()).ok();
let (result, error) = match &payload.result {
Ok(value) => (
Some(McpToolCallResult {
Some(Box::new(McpToolCallResult {
content: value.content.clone(),
structured_content: value.structured_content.clone(),
meta: value.meta.clone(),
}),
})),
None,
),
Err(message) => (
@@ -542,6 +543,7 @@ impl ThreadHistoryBuilder {
.arguments
.clone()
.unwrap_or(serde_json::Value::Null),
mcp_app_resource_uri: payload.mcp_app_resource_uri.clone(),
result,
error,
duration_ms,
@@ -1809,6 +1811,7 @@ mod tests {
tool: "lookup".into(),
arguments: Some(serde_json::json!({"id":"123"})),
},
mcp_app_resource_uri: None,
duration: Duration::from_millis(8),
result: Err("boom".into()),
}),
@@ -1857,6 +1860,7 @@ mod tests {
tool: "lookup".into(),
status: McpToolCallStatus::Failed,
arguments: serde_json::json!({"id":"123"}),
mcp_app_resource_uri: None,
result: None,
error: Some(McpToolCallError {
message: "boom".into(),
@@ -1882,6 +1886,7 @@ mod tests {
tool: "lookup".into(),
arguments: Some(serde_json::json!({"id":"123"})),
},
mcp_app_resource_uri: Some("ui://widget/lookup.html".into()),
duration: Duration::from_millis(8),
result: Ok(CallToolResult {
content: vec![serde_json::json!({
@@ -1911,7 +1916,8 @@ mod tests {
tool: "lookup".into(),
status: McpToolCallStatus::Completed,
arguments: serde_json::json!({"id":"123"}),
result: Some(McpToolCallResult {
mcp_app_resource_uri: Some("ui://widget/lookup.html".into()),
result: Some(Box::new(McpToolCallResult {
content: vec![serde_json::json!({
"type": "text",
"text": "result"
@@ -1920,7 +1926,7 @@ mod tests {
meta: Some(serde_json::json!({
"ui/resourceUri": "ui://widget/lookup.html"
})),
}),
})),
error: None,
duration_ms: Some(8),
}

View File

@@ -4585,7 +4585,10 @@ pub enum ThreadItem {
tool: String,
status: McpToolCallStatus,
arguments: JsonValue,
result: Option<McpToolCallResult>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
mcp_app_resource_uri: Option<String>,
result: Option<Box<McpToolCallResult>>,
error: Option<McpToolCallError>,
/// The duration of the MCP tool call in milliseconds.
#[ts(type = "number | null")]

View File

@@ -4,5 +4,9 @@ codex_rust_crate(
name = "app-server",
crate_name = "codex_app_server",
integration_test_timeout = "long",
test_shard_counts = {
"app-server-all-test": 8,
"app-server-unit-tests": 8,
},
test_tags = ["no-sandbox"],
)

View File

@@ -2910,6 +2910,7 @@ async fn construct_mcp_tool_call_notification(
tool: begin_event.invocation.tool,
status: McpToolCallStatus::InProgress,
arguments: begin_event.invocation.arguments.unwrap_or(JsonValue::Null),
mcp_app_resource_uri: begin_event.mcp_app_resource_uri,
result: None,
error: None,
duration_ms: None,
@@ -2936,11 +2937,11 @@ async fn construct_mcp_tool_call_end_notification(
let (result, error) = match &end_event.result {
Ok(value) => (
Some(McpToolCallResult {
Some(Box::new(McpToolCallResult {
content: value.content.clone(),
structured_content: value.structured_content.clone(),
meta: value.meta.clone(),
}),
})),
None,
),
Err(message) => (
@@ -2957,6 +2958,7 @@ async fn construct_mcp_tool_call_end_notification(
tool: end_event.invocation.tool,
status,
arguments: end_event.invocation.arguments.unwrap_or(JsonValue::Null),
mcp_app_resource_uri: end_event.mcp_app_resource_uri,
result,
error,
duration_ms,
@@ -4219,6 +4221,7 @@ mod tests {
tool: "list_mcp_resources".to_string(),
arguments: Some(serde_json::json!({"server": ""})),
},
mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()),
};
let thread_id = ThreadId::new().to_string();
@@ -4239,6 +4242,7 @@ mod tests {
tool: begin_event.invocation.tool,
status: McpToolCallStatus::InProgress,
arguments: serde_json::json!({"server": ""}),
mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()),
result: None,
error: None,
duration_ms: None,
@@ -4379,6 +4383,7 @@ mod tests {
tool: "list_mcp_resources".to_string(),
arguments: None,
},
mcp_app_resource_uri: None,
};
let thread_id = ThreadId::new().to_string();
@@ -4399,6 +4404,7 @@ mod tests {
tool: begin_event.invocation.tool,
status: McpToolCallStatus::InProgress,
arguments: JsonValue::Null,
mcp_app_resource_uri: None,
result: None,
error: None,
duration_ms: None,
@@ -4430,6 +4436,7 @@ mod tests {
tool: "list_mcp_resources".to_string(),
arguments: Some(serde_json::json!({"server": ""})),
},
mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()),
duration: Duration::from_nanos(92708),
result: Ok(result),
};
@@ -4452,13 +4459,14 @@ mod tests {
tool: end_event.invocation.tool,
status: McpToolCallStatus::Completed,
arguments: serde_json::json!({"server": ""}),
result: Some(McpToolCallResult {
mcp_app_resource_uri: Some("ui://widget/list-resources.html".to_string()),
result: Some(Box::new(McpToolCallResult {
content,
structured_content: None,
meta: Some(serde_json::json!({
"ui/resourceUri": "ui://widget/list-resources.html"
})),
}),
})),
error: None,
duration_ms: Some(0),
},
@@ -4476,6 +4484,7 @@ mod tests {
tool: "list_mcp_resources".to_string(),
arguments: None,
},
mcp_app_resource_uri: None,
duration: Duration::from_millis(1),
result: Err("boom".to_string()),
};
@@ -4498,6 +4507,7 @@ mod tests {
tool: end_event.invocation.tool,
status: McpToolCallStatus::Failed,
arguments: JsonValue::Null,
mcp_app_resource_uri: None,
result: None,
error: Some(McpToolCallError {
message: "boom".to_string(),

View File

@@ -47,6 +47,10 @@ codex_rust_crate(
# succeeds without this workaround.
"//:AGENTS.md",
],
test_shard_counts = {
"core-all-test": 8,
"core-unit-tests": 8,
},
test_tags = ["no-sandbox"],
unit_test_timeout = "long",
extra_binaries = [

View File

@@ -101,6 +101,9 @@ pub(crate) async fn handle_mcp_tool_call(
let metadata =
lookup_mcp_tool_metadata(sess.as_ref(), turn_context.as_ref(), &server, &tool_name).await;
let mcp_app_resource_uri = metadata
.as_ref()
.and_then(|metadata| metadata.mcp_app_resource_uri.clone());
let app_tool_policy = if server == CODEX_APPS_MCP_SERVER_NAME {
connectors::app_tool_policy(
&turn_context.config,
@@ -130,6 +133,7 @@ pub(crate) async fn handle_mcp_tool_call(
turn_context.as_ref(),
&call_id,
invocation,
mcp_app_resource_uri.clone(),
"MCP tool call blocked by app configuration".to_string(),
/*already_started*/ false,
)
@@ -161,6 +165,7 @@ pub(crate) async fn handle_mcp_tool_call(
let tool_call_begin_event = EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
call_id: call_id.clone(),
invocation: invocation.clone(),
mcp_app_resource_uri: mcp_app_resource_uri.clone(),
});
notify_mcp_tool_call_event(sess.as_ref(), turn_context.as_ref(), tool_call_begin_event).await;
@@ -213,6 +218,7 @@ pub(crate) async fn handle_mcp_tool_call(
let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent {
call_id: call_id.clone(),
invocation,
mcp_app_resource_uri: mcp_app_resource_uri.clone(),
duration,
result: result.clone(),
});
@@ -239,6 +245,7 @@ pub(crate) async fn handle_mcp_tool_call(
turn_context.as_ref(),
&call_id,
invocation,
mcp_app_resource_uri.clone(),
message,
/*already_started*/ true,
)
@@ -254,6 +261,7 @@ pub(crate) async fn handle_mcp_tool_call(
turn_context.as_ref(),
&call_id,
invocation,
mcp_app_resource_uri.clone(),
message,
/*already_started*/ true,
)
@@ -268,6 +276,7 @@ pub(crate) async fn handle_mcp_tool_call(
turn_context.as_ref(),
&call_id,
invocation,
mcp_app_resource_uri.clone(),
message,
/*already_started*/ true,
)
@@ -325,6 +334,7 @@ pub(crate) async fn handle_mcp_tool_call(
let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent {
call_id: call_id.clone(),
invocation,
mcp_app_resource_uri,
duration,
result: result.clone(),
});
@@ -642,11 +652,14 @@ pub(crate) struct McpToolApprovalMetadata {
connector_description: Option<String>,
tool_title: Option<String>,
tool_description: Option<String>,
mcp_app_resource_uri: Option<String>,
codex_apps_meta: Option<serde_json::Map<String, serde_json::Value>>,
openai_file_input_params: Option<Vec<String>>,
}
const MCP_TOOL_CODEX_APPS_META_KEY: &str = "_codex_apps";
const MCP_TOOL_OPENAI_OUTPUT_TEMPLATE_META_KEY: &str = "openai/outputTemplate";
const MCP_TOOL_UI_RESOURCE_URI_META_KEY: &str = "ui/resourceUri";
fn custom_mcp_tool_approval_mode(
turn_context: &TurnContext,
@@ -1100,6 +1113,7 @@ pub(crate) async fn lookup_mcp_tool_metadata(
connector_description,
tool_title: tool_info.tool.title,
tool_description: tool_info.tool.description.map(std::borrow::Cow::into_owned),
mcp_app_resource_uri: get_mcp_app_resource_uri(tool_info.tool.meta.as_deref()),
codex_apps_meta: tool_info
.tool
.meta
@@ -1114,6 +1128,26 @@ pub(crate) async fn lookup_mcp_tool_metadata(
})
}
fn get_mcp_app_resource_uri(
meta: Option<&serde_json::Map<String, serde_json::Value>>,
) -> Option<String> {
meta.and_then(|meta| {
meta.get("ui")
.and_then(serde_json::Value::as_object)
.and_then(|ui| ui.get("resourceUri"))
.and_then(serde_json::Value::as_str)
.or_else(|| {
meta.get(MCP_TOOL_UI_RESOURCE_URI_META_KEY)
.and_then(serde_json::Value::as_str)
})
.or_else(|| {
meta.get(MCP_TOOL_OPENAI_OUTPUT_TEMPLATE_META_KEY)
.and_then(serde_json::Value::as_str)
})
.map(str::to_string)
})
}
async fn lookup_mcp_app_usage_metadata(
sess: &Session,
server: &str,
@@ -1666,6 +1700,7 @@ async fn notify_mcp_tool_call_skip(
turn_context: &TurnContext,
call_id: &str,
invocation: McpInvocation,
mcp_app_resource_uri: Option<String>,
message: String,
already_started: bool,
) -> Result<CallToolResult, String> {
@@ -1673,6 +1708,7 @@ async fn notify_mcp_tool_call_skip(
let tool_call_begin_event = EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
call_id: call_id.to_string(),
invocation: invocation.clone(),
mcp_app_resource_uri: mcp_app_resource_uri.clone(),
});
notify_mcp_tool_call_event(sess, turn_context, tool_call_begin_event).await;
}
@@ -1680,6 +1716,7 @@ async fn notify_mcp_tool_call_skip(
let tool_call_end_event = EventMsg::McpToolCallEnd(McpToolCallEndEvent {
call_id: call_id.to_string(),
invocation,
mcp_app_resource_uri,
duration: Duration::ZERO,
result: Err(message.clone()),
});

View File

@@ -59,6 +59,7 @@ fn approval_metadata(
connector_description: connector_description.map(str::to_string),
tool_title: tool_title.map(str::to_string),
tool_description: tool_description.map(str::to_string),
mcp_app_resource_uri: None,
codex_apps_meta: None,
openai_file_input_params: None,
}
@@ -74,6 +75,35 @@ fn prompt_options(
}
}
#[test]
fn mcp_app_resource_uri_reads_known_tool_meta_keys() {
let nested = serde_json::json!({
"ui": {
"resourceUri": "ui://widget/nested.html",
},
});
assert_eq!(
get_mcp_app_resource_uri(nested.as_object()),
Some("ui://widget/nested.html".to_string())
);
let flat = serde_json::json!({
"ui/resourceUri": "ui://widget/flat.html",
});
assert_eq!(
get_mcp_app_resource_uri(flat.as_object()),
Some("ui://widget/flat.html".to_string())
);
let output_template = serde_json::json!({
"openai/outputTemplate": "ui://widget/output-template.html",
});
assert_eq!(
get_mcp_app_resource_uri(output_template.as_object()),
Some("ui://widget/output-template.html".to_string())
);
}
#[test]
fn approval_required_when_read_only_false_and_destructive() {
let annotations = annotations(Some(false), Some(true), /*open_world*/ None);
@@ -589,6 +619,7 @@ async fn codex_apps_tool_call_request_meta_includes_turn_metadata_and_codex_apps
connector_description: Some("Manage events".to_string()),
tool_title: Some("Create Event".to_string()),
tool_description: Some("Create a calendar event.".to_string()),
mcp_app_resource_uri: None,
codex_apps_meta: Some(
serde_json::json!({
"resource_uri": "connector://calendar/tools/calendar_create_event",
@@ -746,6 +777,7 @@ fn guardian_mcp_review_request_includes_annotations_when_present() {
connector_description: None,
tool_title: None,
tool_description: None,
mcp_app_resource_uri: None,
codex_apps_meta: None,
openai_file_input_params: None,
};
@@ -1272,6 +1304,7 @@ async fn approve_mode_skips_when_annotations_do_not_require_approval() {
connector_description: None,
tool_title: Some("Read Only Tool".to_string()),
tool_description: None,
mcp_app_resource_uri: None,
codex_apps_meta: None,
openai_file_input_params: None,
};
@@ -1340,6 +1373,7 @@ async fn guardian_mode_skips_auto_when_annotations_do_not_require_approval() {
connector_description: None,
tool_title: Some("Read Only Tool".to_string()),
tool_description: None,
mcp_app_resource_uri: None,
codex_apps_meta: None,
openai_file_input_params: None,
};
@@ -1411,6 +1445,7 @@ async fn guardian_mode_mcp_denial_returns_rationale_message() {
connector_description: None,
tool_title: Some("Dangerous Tool".to_string()),
tool_description: Some("Reads calendar data.".to_string()),
mcp_app_resource_uri: None,
codex_apps_meta: None,
openai_file_input_params: None,
};
@@ -1462,6 +1497,7 @@ async fn prompt_mode_waits_for_approval_when_annotations_do_not_require_approval
connector_description: None,
tool_title: Some("Read Only Tool".to_string()),
tool_description: None,
mcp_app_resource_uri: None,
codex_apps_meta: None,
openai_file_input_params: None,
};
@@ -1539,6 +1575,7 @@ async fn approve_mode_blocks_when_arc_returns_interrupt_for_model() {
connector_description: Some("Manage events".to_string()),
tool_title: Some("Dangerous Tool".to_string()),
tool_description: Some("Performs a risky action.".to_string()),
mcp_app_resource_uri: None,
codex_apps_meta: None,
openai_file_input_params: None,
};
@@ -1609,6 +1646,7 @@ async fn custom_approve_mode_blocks_when_arc_returns_interrupt_for_model() {
connector_description: None,
tool_title: Some("Dangerous Tool".to_string()),
tool_description: Some("Performs a risky action.".to_string()),
mcp_app_resource_uri: None,
codex_apps_meta: None,
openai_file_input_params: None,
};
@@ -1679,6 +1717,7 @@ async fn approve_mode_blocks_when_arc_returns_interrupt_without_annotations() {
connector_description: Some("Manage events".to_string()),
tool_title: Some("Dangerous Tool".to_string()),
tool_description: Some("Performs a risky action.".to_string()),
mcp_app_resource_uri: None,
codex_apps_meta: None,
openai_file_input_params: None,
};
@@ -1757,6 +1796,7 @@ async fn full_access_mode_skips_arc_monitor_for_all_approval_modes() {
connector_description: Some("Manage events".to_string()),
tool_title: Some("Dangerous Tool".to_string()),
tool_description: Some("Performs a risky action.".to_string()),
mcp_app_resource_uri: None,
codex_apps_meta: None,
openai_file_input_params: None,
};
@@ -1859,6 +1899,7 @@ async fn approve_mode_routes_arc_ask_user_to_guardian_when_guardian_reviewer_is_
connector_description: Some("Manage events".to_string()),
tool_title: Some("Dangerous Tool".to_string()),
tool_description: Some("Performs a risky action.".to_string()),
mcp_app_resource_uri: None,
codex_apps_meta: None,
openai_file_input_params: None,
};

View File

@@ -562,6 +562,7 @@ async fn emit_tool_call_begin(
EventMsg::McpToolCallBegin(McpToolCallBeginEvent {
call_id: call_id.to_string(),
invocation,
mcp_app_resource_uri: None,
}),
)
.await;
@@ -581,6 +582,7 @@ async fn emit_tool_call_end(
EventMsg::McpToolCallEnd(McpToolCallEndEvent {
call_id: call_id.to_string(),
invocation,
mcp_app_resource_uri: None,
duration,
result,
}),

View File

@@ -21,6 +21,8 @@ const SERVER_VERSION: &str = "1.0.0";
const SEARCHABLE_TOOL_COUNT: usize = 100;
pub const CALENDAR_CREATE_EVENT_RESOURCE_URI: &str =
"connector://calendar/tools/calendar_create_event";
pub const CALENDAR_CREATE_EVENT_MCP_APP_RESOURCE_URI: &str =
"ui://widget/calendar-create-event.html";
const CALENDAR_LIST_EVENTS_RESOURCE_URI: &str = "connector://calendar/tools/calendar_list_events";
pub const DOCUMENT_EXTRACT_TEXT_RESOURCE_URI: &str =
"connector://calendar/tools/calendar_extract_text";
@@ -206,6 +208,7 @@ impl Respond for CodexAppsJsonRpcResponder {
"connector_id": CONNECTOR_ID,
"connector_name": self.connector_name.clone(),
"connector_description": self.connector_description.clone(),
"openai/outputTemplate": CALENDAR_CREATE_EVENT_MCP_APP_RESOURCE_URI,
"_codex_apps": {
"resource_uri": CALENDAR_CREATE_EVENT_RESOURCE_URI,
"contains_mcp_source": true,

View File

@@ -736,6 +736,7 @@ async fn stdio_image_responses_round_trip() -> anyhow::Result<()> {
tool: "image".to_string(),
arguments: Some(json!({})),
},
mcp_app_resource_uri: None,
},
);

View File

@@ -15,6 +15,7 @@ use codex_protocol::protocol::Op;
use codex_protocol::protocol::SandboxPolicy;
use codex_protocol::user_input::UserInput;
use core_test_support::apps_test_server::AppsTestServer;
use core_test_support::apps_test_server::CALENDAR_CREATE_EVENT_MCP_APP_RESOURCE_URI;
use core_test_support::apps_test_server::CALENDAR_CREATE_EVENT_RESOURCE_URI;
use core_test_support::responses::ResponsesRequest;
use core_test_support::responses::ev_assistant_message;
@@ -93,7 +94,7 @@ fn tool_search_output_tools(request: &ResponsesRequest, call_id: &str) -> Vec<Va
.unwrap_or_default()
}
fn configure_apps_without_tool_search(config: &mut Config, apps_base_url: &str) {
fn configure_search_capable_apps(config: &mut Config, apps_base_url: &str) {
config
.features
.enable(Feature::Apps)
@@ -112,14 +113,18 @@ fn configure_apps_without_tool_search(config: &mut Config, apps_base_url: &str)
config.model_catalog = Some(model_catalog);
}
fn configure_apps(config: &mut Config, apps_base_url: &str) {
configure_apps_without_tool_search(config, apps_base_url);
fn configure_apps_without_tool_search(config: &mut Config, apps_base_url: &str) {
configure_search_capable_apps(config, apps_base_url);
config
.features
.enable(Feature::ToolSearch)
.disable(Feature::ToolSearch)
.expect("test config should allow feature update");
}
fn configure_apps(config: &mut Config, apps_base_url: &str) {
configure_search_capable_apps(config, apps_base_url);
}
fn configured_builder(apps_base_url: String) -> TestCodexBuilder {
test_codex()
.with_auth(CodexAuth::create_dummy_chatgpt_auth_for_testing())
@@ -127,7 +132,7 @@ fn configured_builder(apps_base_url: String) -> TestCodexBuilder {
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn search_tool_flag_adds_tool_search() -> Result<()> {
async fn search_tool_enabled_by_default_adds_tool_search() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
@@ -185,7 +190,7 @@ async fn search_tool_flag_adds_tool_search() -> Result<()> {
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn tool_search_disabled_by_default_exposes_apps_tools_directly() -> Result<()> {
async fn tool_search_disabled_exposes_apps_tools_directly() -> Result<()> {
skip_if_no_network!(Ok(()));
let server = start_mock_server().await;
@@ -453,6 +458,19 @@ async fn tool_search_returns_deferred_tools_without_follow_up_tool_injection() -
})
.await?;
let EventMsg::McpToolCallBegin(begin) = wait_for_event(&test.codex, |event| {
matches!(event, EventMsg::McpToolCallBegin(_))
})
.await
else {
unreachable!("event guard guarantees McpToolCallBegin");
};
assert_eq!(begin.call_id, "calendar-call-1");
assert_eq!(
begin.mcp_app_resource_uri.as_deref(),
Some(CALENDAR_CREATE_EVENT_MCP_APP_RESOURCE_URI)
);
let EventMsg::McpToolCallEnd(end) = wait_for_event(&test.codex, |event| {
matches!(event, EventMsg::McpToolCallEnd(_))
})
@@ -461,6 +479,10 @@ async fn tool_search_returns_deferred_tools_without_follow_up_tool_injection() -
unreachable!("event guard guarantees McpToolCallEnd");
};
assert_eq!(end.call_id, "calendar-call-1");
assert_eq!(
end.mcp_app_resource_uri.as_deref(),
Some(CALENDAR_CREATE_EVENT_MCP_APP_RESOURCE_URI)
);
assert_eq!(
end.invocation,
McpInvocation {

View File

@@ -464,6 +464,7 @@ fn mcp_tool_call_begin_and_end_emit_item_events() {
tool: "tool_x".to_string(),
status: ApiMcpToolCallStatus::InProgress,
arguments: json!({ "key": "value" }),
mcp_app_resource_uri: None,
result: None,
error: None,
duration_ms: None,
@@ -479,11 +480,12 @@ fn mcp_tool_call_begin_and_end_emit_item_events() {
tool: "tool_x".to_string(),
status: ApiMcpToolCallStatus::Completed,
arguments: json!({ "key": "value" }),
result: Some(McpToolCallResult {
mcp_app_resource_uri: None,
result: Some(Box::new(McpToolCallResult {
content: Vec::new(),
structured_content: None,
meta: None,
}),
})),
error: None,
duration_ms: Some(1_000),
},
@@ -547,6 +549,7 @@ fn mcp_tool_call_failure_sets_failed_status() {
tool: "tool_y".to_string(),
status: ApiMcpToolCallStatus::Failed,
arguments: json!({ "param": 42 }),
mcp_app_resource_uri: None,
result: None,
error: Some(McpToolCallError {
message: "tool exploded".to_string(),
@@ -593,6 +596,7 @@ fn mcp_tool_call_defaults_arguments_and_preserves_structured_content() {
tool: "tool_z".to_string(),
status: ApiMcpToolCallStatus::InProgress,
arguments: serde_json::Value::Null,
mcp_app_resource_uri: None,
result: None,
error: None,
duration_ms: None,
@@ -608,14 +612,15 @@ fn mcp_tool_call_defaults_arguments_and_preserves_structured_content() {
tool: "tool_z".to_string(),
status: ApiMcpToolCallStatus::Completed,
arguments: serde_json::Value::Null,
result: Some(McpToolCallResult {
mcp_app_resource_uri: None,
result: Some(Box::new(McpToolCallResult {
content: vec![json!({
"type": "text",
"text": "done",
})],
structured_content: Some(json!({ "status": "ok" })),
meta: None,
}),
})),
error: None,
duration_ms: Some(10),
},

View File

@@ -788,8 +788,8 @@ pub const FEATURES: &[FeatureSpec] = &[
FeatureSpec {
id: Feature::ToolSearch,
key: "tool_search",
stage: Stage::UnderDevelopment,
default_enabled: false,
stage: Stage::Stable,
default_enabled: true,
},
FeatureSpec {
id: Feature::UnavailableDummyTools,

View File

@@ -128,9 +128,9 @@ fn tool_suggest_is_stable_and_enabled_by_default() {
}
#[test]
fn tool_search_is_under_development_and_disabled_by_default() {
assert_eq!(Feature::ToolSearch.stage(), Stage::UnderDevelopment);
assert_eq!(Feature::ToolSearch.default_enabled(), false);
fn tool_search_is_stable_and_enabled_by_default() {
assert_eq!(Feature::ToolSearch.stage(), Stage::Stable);
assert_eq!(Feature::ToolSearch.default_enabled(), true);
}
#[test]

View File

@@ -2362,6 +2362,9 @@ pub struct McpToolCallBeginEvent {
/// Identifier so this can be paired with the McpToolCallEnd event.
pub call_id: String,
pub invocation: McpInvocation,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub mcp_app_resource_uri: Option<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS, PartialEq)]
@@ -2369,6 +2372,9 @@ pub struct McpToolCallEndEvent {
/// Identifier for the corresponding McpToolCallBegin that finished.
pub call_id: String,
pub invocation: McpInvocation,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[ts(optional)]
pub mcp_app_resource_uri: Option<String>,
#[ts(type = "string")]
pub duration: Duration,
/// Result of the tool call. Note this could be an error.

View File

@@ -1334,7 +1334,7 @@ fn search_tool_description_lists_each_mcp_source_once() {
}
#[test]
fn search_tool_requires_model_capability_and_feature_flag() {
fn search_tool_requires_model_capability_and_enabled_feature() {
let model_info = search_capable_model_info();
let deferred_mcp_tools = Some(vec![deferred_mcp_tool(
"_create_event",
@@ -1367,10 +1367,12 @@ fn search_tool_requires_model_capability_and_feature_flag() {
);
assert_lacks_tool_name(&tools, TOOL_SEARCH_TOOL_NAME);
let mut features_without_tool_search = Features::with_defaults();
features_without_tool_search.disable(Feature::ToolSearch);
let tools_config = ToolsConfig::new(&ToolsConfigParams {
model_info: &model_info,
available_models: &available_models,
features: &features,
features: &features_without_tool_search,
image_generation_tool_auth_allowed: true,
web_search_mode: Some(WebSearchMode::Cached),
session_source: SessionSource::Cli,
@@ -1385,8 +1387,6 @@ fn search_tool_requires_model_capability_and_feature_flag() {
);
assert_lacks_tool_name(&tools, TOOL_SEARCH_TOOL_NAME);
let mut features = Features::with_defaults();
features.enable(Feature::ToolSearch);
let tools_config = ToolsConfig::new(&ToolsConfigParams {
model_info: &model_info,
available_models: &available_models,

View File

@@ -19,6 +19,9 @@ codex_rust_crate(
"src/**/*.rs",
"src/**/snapshots/**",
]) + ["//codex-rs/core:model_availability_nux_fixtures"],
test_shard_counts = {
"tui-unit-tests": 8,
},
integration_compile_data_extra = ["src/test_backend.rs"],
extra_binaries = [
"//codex-rs/cli:codex",

View File

@@ -4759,6 +4759,7 @@ impl ChatWidget {
invocation,
duration,
result,
..
} = ev;
let extra_cell = match self
@@ -5943,6 +5944,7 @@ impl ChatWidget {
server,
tool,
arguments,
mcp_app_resource_uri,
result,
error,
duration_ms,
@@ -5955,15 +5957,19 @@ impl ChatWidget {
tool,
arguments: Some(arguments),
},
mcp_app_resource_uri,
duration: Duration::from_millis(duration_ms.unwrap_or_default().max(0) as u64),
result: match (result, error) {
(_, Some(error)) => Err(error.message),
(Some(result), None) => Ok(codex_protocol::mcp::CallToolResult {
content: result.content,
structured_content: result.structured_content,
is_error: Some(false),
meta: None,
}),
(Some(result), None) => {
let result = *result;
Ok(codex_protocol::mcp::CallToolResult {
content: result.content,
structured_content: result.structured_content,
is_error: Some(false),
meta: None,
})
}
(None, None) => Err("MCP tool call completed without a result".to_string()),
},
});
@@ -6453,6 +6459,7 @@ impl ChatWidget {
server,
tool,
arguments,
mcp_app_resource_uri,
..
} => {
self.on_mcp_tool_call_begin(McpToolCallBeginEvent {
@@ -6462,6 +6469,7 @@ impl ChatWidget {
tool,
arguments: Some(arguments),
},
mcp_app_resource_uri,
});
}
ThreadItem::WebSearch { id, .. } => {

243
defs.bzl
View File

@@ -79,6 +79,14 @@ def _workspace_root_test_impl(ctx):
runfiles = runfiles.merge(ctx.runfiles(files = data_dep[DefaultInfo].files.to_list()))
runfiles = runfiles.merge(data_dep[DefaultInfo].default_runfiles)
env = {}
inherited_environment = []
if RunEnvironmentInfo in ctx.attr.test_bin:
test_bin_env = ctx.attr.test_bin[RunEnvironmentInfo]
env.update(test_bin_env.environment)
inherited_environment = test_bin_env.inherited_environment
env.update(ctx.attr.env)
return [
DefaultInfo(
executable = launcher,
@@ -86,7 +94,8 @@ def _workspace_root_test_impl(ctx):
runfiles = runfiles,
),
RunEnvironmentInfo(
environment = ctx.attr.env,
environment = env,
inherited_environment = inherited_environment,
),
]
@@ -122,6 +131,73 @@ workspace_root_test = rule(
},
)
def _test_binary_test_impl(ctx):
is_windows = ctx.target_platform_has_constraint(ctx.attr._windows_constraint[platform_common.ConstraintValueInfo])
launcher = ctx.actions.declare_file(ctx.label.name + ".bat" if is_windows else ctx.label.name)
test_bin = ctx.executable.test_bin
launcher_template = ctx.file._windows_launcher_template if is_windows else ctx.file._bash_launcher_template
ctx.actions.expand_template(
template = launcher_template,
output = launcher,
is_executable = True,
substitutions = {
"__TEST_BIN__": test_bin.short_path,
},
)
runfiles = ctx.runfiles(files = [test_bin]).merge(ctx.attr.test_bin[DefaultInfo].default_runfiles)
for data_dep in ctx.attr.data:
runfiles = runfiles.merge(ctx.runfiles(files = data_dep[DefaultInfo].files.to_list()))
runfiles = runfiles.merge(data_dep[DefaultInfo].default_runfiles)
env = {}
inherited_environment = []
if RunEnvironmentInfo in ctx.attr.test_bin:
test_bin_env = ctx.attr.test_bin[RunEnvironmentInfo]
env.update(test_bin_env.environment)
inherited_environment = test_bin_env.inherited_environment
env.update(ctx.attr.env)
return [
DefaultInfo(
executable = launcher,
files = depset([launcher]),
runfiles = runfiles,
),
RunEnvironmentInfo(
environment = env,
inherited_environment = inherited_environment,
),
]
test_binary_test = rule(
implementation = _test_binary_test_impl,
test = True,
attrs = {
"data": attr.label_list(
allow_files = True,
),
"env": attr.string_dict(),
"test_bin": attr.label(
cfg = "target",
executable = True,
mandatory = True,
),
"_windows_constraint": attr.label(
default = "@platforms//os:windows",
providers = [platform_common.ConstraintValueInfo],
),
"_bash_launcher_template": attr.label(
allow_single_file = True,
default = "//:test_binary_test_launcher.sh.tpl",
),
"_windows_launcher_template": attr.label(
allow_single_file = True,
default = "//:test_binary_test_launcher.bat.tpl",
),
},
)
def codex_rust_crate(
name,
crate_name,
@@ -140,6 +216,7 @@ def codex_rust_crate(
integration_test_args = [],
integration_test_timeout = None,
test_data_extra = [],
test_shard_counts = {},
test_tags = [],
unit_test_timeout = None,
extra_binaries = []):
@@ -174,6 +251,13 @@ def codex_rust_crate(
integration_test_timeout: Optional Bazel timeout for integration test
targets generated from `tests/*.rs`.
test_data_extra: Extra runtime data for tests.
test_shard_counts: Mapping from generated test target name to explicit
shard target count. Matching tests are exposed as `test_suite`
aggregate targets over generated `*-shard-N-of-M` test targets.
The shard targets opt into the rules_rust libtest sharding wrapper
and set RULES_RUST_TEST_TOTAL_SHARDS/RULES_RUST_TEST_SHARD_INDEX
themselves. For unit tests, use the outer target name, such as
`core-unit-tests`.
test_tags: Tags applied to unit + integration test targets.
Typically used to disable the sandbox, but see https://bazel.build/reference/be/common-definitions#common.tags
unit_test_timeout: Optional Bazel timeout for the unit-test target
@@ -246,7 +330,13 @@ def codex_rust_crate(
visibility = ["//visibility:public"],
)
unit_test_name = name + "-unit-tests"
unit_test_binary = name + "-unit-tests-bin"
unit_test_shard_count = _test_shard_count(test_shard_counts, unit_test_name)
unit_test_binary_kwargs = {}
if unit_test_shard_count:
unit_test_binary_kwargs["experimental_enable_sharding"] = True
rust_test(
name = unit_test_binary,
crate = name,
@@ -265,20 +355,41 @@ def codex_rust_crate(
rustc_env = rustc_env,
data = test_data_extra,
tags = test_tags + ["manual"],
**unit_test_binary_kwargs
)
unit_test_kwargs = {}
if unit_test_timeout:
unit_test_kwargs["timeout"] = unit_test_timeout
workspace_root_test(
name = name + "-unit-tests",
env = test_env,
test_bin = ":" + unit_test_binary,
workspace_root_marker = "//codex-rs/utils/cargo-bin:repo_root.marker",
tags = test_tags,
**unit_test_kwargs
)
if unit_test_shard_count:
unit_shard_targets = []
for shard_index in range(unit_test_shard_count):
shard_name = _test_shard_name(unit_test_name, shard_index, unit_test_shard_count)
workspace_root_test(
name = shard_name,
env = test_env | _test_shard_env(shard_index, unit_test_shard_count),
test_bin = ":" + unit_test_binary,
workspace_root_marker = "//codex-rs/utils/cargo-bin:repo_root.marker",
tags = test_tags,
**unit_test_kwargs
)
unit_shard_targets.append(":" + shard_name)
native.test_suite(
name = unit_test_name,
tests = unit_shard_targets,
tags = test_tags,
)
else:
workspace_root_test(
name = unit_test_name,
env = test_env,
test_bin = ":" + unit_test_binary,
workspace_root_marker = "//codex-rs/utils/cargo-bin:repo_root.marker",
tags = test_tags,
**unit_test_kwargs
)
maybe_deps += [name]
@@ -318,26 +429,94 @@ def codex_rust_crate(
if not test_name.endswith("-test"):
test_name += "-test"
rust_test(
name = test_name,
crate_name = test_crate_name,
crate_root = test,
srcs = [test],
data = native.glob(["tests/**"], allow_empty = True) + sanitized_binaries + test_data_extra,
compile_data = native.glob(["tests/**"], allow_empty = True) + integration_compile_data_extra,
deps = all_crate_deps(normal = True, normal_dev = True) + maybe_deps + deps_extra,
# Bazel has emitted both `codex-rs/<crate>/...` and
# `../codex-rs/<crate>/...` paths for `file!()`. Strip either
# prefix so Insta records Cargo-like metadata such as `core/tests/...`.
rustc_flags = rustc_flags_extra + WINDOWS_RUSTC_LINK_FLAGS + [
"--remap-path-prefix=../codex-rs=",
"--remap-path-prefix=codex-rs=",
],
rustc_env = rustc_env,
# Important: do not merge `test_env` here. Its unit-test-only
# `INSTA_WORKSPACE_ROOT="codex-rs"` is tuned for unit tests that
# execute from the repo root and can misplace integration snapshots.
env = cargo_env,
tags = test_tags,
**integration_test_kwargs
)
test_kwargs = {}
test_kwargs.update(integration_test_kwargs)
test_shard_count = _test_shard_count(test_shard_counts, test_name)
if test_shard_count:
test_binary_name = test_name + "-bin"
rust_test(
name = test_binary_name,
crate_name = test_crate_name,
crate_root = test,
srcs = [test],
data = native.glob(["tests/**"], allow_empty = True) + sanitized_binaries + test_data_extra,
compile_data = native.glob(["tests/**"], allow_empty = True) + integration_compile_data_extra,
deps = all_crate_deps(normal = True, normal_dev = True) + maybe_deps + deps_extra,
# Bazel has emitted both `codex-rs/<crate>/...` and
# `../codex-rs/<crate>/...` paths for `file!()`. Strip either
# prefix so Insta records Cargo-like metadata such as `core/tests/...`.
rustc_flags = rustc_flags_extra + WINDOWS_RUSTC_LINK_FLAGS + [
"--remap-path-prefix=../codex-rs=",
"--remap-path-prefix=codex-rs=",
],
rustc_env = rustc_env,
# Important: do not merge `test_env` here. Its unit-test-only
# `INSTA_WORKSPACE_ROOT="codex-rs"` is tuned for unit tests that
# execute from the repo root and can misplace integration snapshots.
env = cargo_env,
tags = test_tags + ["manual"],
experimental_enable_sharding = True,
**test_kwargs
)
integration_shard_targets = []
for shard_index in range(test_shard_count):
shard_name = _test_shard_name(test_name, shard_index, test_shard_count)
test_binary_test(
name = shard_name,
env = _test_shard_env(shard_index, test_shard_count),
test_bin = ":" + test_binary_name,
tags = test_tags,
**test_kwargs
)
integration_shard_targets.append(":" + shard_name)
native.test_suite(
name = test_name,
tests = integration_shard_targets,
tags = test_tags,
)
else:
rust_test(
name = test_name,
crate_name = test_crate_name,
crate_root = test,
srcs = [test],
data = native.glob(["tests/**"], allow_empty = True) + sanitized_binaries + test_data_extra,
compile_data = native.glob(["tests/**"], allow_empty = True) + integration_compile_data_extra,
deps = all_crate_deps(normal = True, normal_dev = True) + maybe_deps + deps_extra,
# Bazel has emitted both `codex-rs/<crate>/...` and
# `../codex-rs/<crate>/...` paths for `file!()`. Strip either
# prefix so Insta records Cargo-like metadata such as `core/tests/...`.
rustc_flags = rustc_flags_extra + WINDOWS_RUSTC_LINK_FLAGS + [
"--remap-path-prefix=../codex-rs=",
"--remap-path-prefix=codex-rs=",
],
rustc_env = rustc_env,
# Important: do not merge `test_env` here. Its unit-test-only
# `INSTA_WORKSPACE_ROOT="codex-rs"` is tuned for unit tests that
# execute from the repo root and can misplace integration snapshots.
env = cargo_env,
tags = test_tags,
**test_kwargs
)
def _test_shard_count(test_shard_counts, test_name):
shard_count = test_shard_counts.get(test_name)
if shard_count == None:
return None
if shard_count < 1:
fail("test_shard_counts[{}] must be a positive integer".format(test_name))
return shard_count
def _test_shard_env(shard_index, shard_count):
return {
"RULES_RUST_TEST_SHARD_INDEX": str(shard_index),
"RULES_RUST_TEST_TOTAL_SHARDS": str(shard_count),
}
def _test_shard_name(test_name, shard_index, shard_count):
return "{}-shard-{}-of-{}".format(test_name, shard_index + 1, shard_count)

View File

@@ -10,7 +10,7 @@ exports_files([
"rules_rust_windows_exec_bin_target.patch",
"rules_rust_windows_exec_std.patch",
"rules_rust_windows_process_wrapper_skip_temp_outputs.patch",
"rules_rust_repository_set_exec_constraints.patch",
"rules_rust_stable_explicit_test_shards.patch",
"rules_rust_windows_msvc_direct_link_args.patch",
"rules_rust_windows_gnullvm_build_script.patch",
"rules_rs_windows_gnullvm_exec.patch",

View File

@@ -1,26 +0,0 @@
# What: let `rules_rust` repository_set entries specify an explicit exec-platform
# constraint set.
# Why: codex needs Windows nightly lint toolchains to run helper binaries on an
# MSVC exec platform while still targeting `windows-gnullvm` crates.
diff --git a/rust/extensions.bzl b/rust/extensions.bzl
--- a/rust/extensions.bzl
+++ b/rust/extensions.bzl
@@ -52,6 +52,7 @@ def _rust_impl(module_ctx):
"allocator_library": repository_set.allocator_library,
"dev_components": repository_set.dev_components,
"edition": repository_set.edition,
+ "exec_compatible_with": [str(v) for v in repository_set.exec_compatible_with] if repository_set.exec_compatible_with else None,
"exec_triple": repository_set.exec_triple,
"extra_target_triples": {repository_set.target_triple: [str(v) for v in repository_set.target_compatible_with]},
"name": repository_set.name,
@@ -166,6 +167,9 @@ _COMMON_TAG_KWARGS = {
_RUST_REPOSITORY_SET_TAG_ATTRS = {
+ "exec_compatible_with": attr.label_list(
+ doc = "Execution platform constraints for this repository_set.",
+ ),
"exec_triple": attr.string(
doc = "Exec triple for this repository_set.",
),
"name": attr.string(

View File

@@ -0,0 +1,290 @@
# What: make rust_test sharding assign tests by stable name hash and accept
# explicit per-target shard env vars.
# Why: Codex generates separate Bazel test labels for each shard so BuildBuddy
# can report flakiness and timing per shard label, but Bazel reserves TEST_*
# vars in normal test env. This mirrors hermeticbuild/rules_rust#14.
diff --git a/rust/private/rust.bzl b/rust/private/rust.bzl
index 57b2794f7..ffa8ece7d 100644
--- a/rust/private/rust.bzl
+++ b/rust/private/rust.bzl
@@ -928,15 +928,6 @@ _RUST_TEST_ATTRS = {
"env_inherit": attr.string_list(
doc = "Specifies additional environment variables to inherit from the external environment when the test is executed by bazel test.",
),
- "use_libtest_harness": attr.bool(
- mandatory = False,
- default = True,
- doc = dedent("""\
- Whether to use `libtest`. For targets using this flag, individual tests can be run by using the
- [--test_arg](https://docs.bazel.build/versions/4.0.0/command-line-reference.html#flag--test_arg) flag.
- E.g. `bazel test //src:rust_test --test_arg=foo::test::test_fn`.
- """),
- ),
"experimental_enable_sharding": attr.bool(
mandatory = False,
default = False,
@@ -945,14 +936,25 @@ _RUST_TEST_ATTRS = {
When enabled, tests are executed via a wrapper script that:
1. Enumerates tests using libtest's --list flag
- 2. Partitions tests across shards based on TEST_SHARD_INDEX/TEST_TOTAL_SHARDS
- 3. Runs only the tests assigned to the current shard
+ 2. Sorts tests by name and partitions them across shards by stable name hash
+ 3. Uses either Bazel's native TEST_TOTAL_SHARDS/TEST_SHARD_INDEX env
+ or explicit RULES_RUST_TEST_TOTAL_SHARDS/RULES_RUST_TEST_SHARD_INDEX env
+ 4. Runs only the tests assigned to the current shard
This attribute only has an effect when use_libtest_harness is True.
This is experimental and may change in future releases.
"""),
),
+ "use_libtest_harness": attr.bool(
+ mandatory = False,
+ default = True,
+ doc = dedent("""\
+ Whether to use `libtest`. For targets using this flag, individual tests can be run by using the
+ [--test_arg](https://docs.bazel.build/versions/4.0.0/command-line-reference.html#flag--test_arg) flag.
+ E.g. `bazel test //src:rust_test --test_arg=foo::test::test_fn`.
+ """),
+ ),
"_test_sharding_wrapper_unix": attr.label(
default = Label("//rust/private:test_sharding_wrapper.sh"),
allow_single_file = True,
diff --git a/rust/private/test_sharding_wrapper.bat b/rust/private/test_sharding_wrapper.bat
index 5c90681c8..3e0acbb54 100644
--- a/rust/private/test_sharding_wrapper.bat
+++ b/rust/private/test_sharding_wrapper.bat
@@ -14,7 +14,8 @@
@REM Wrapper script for rust_test that enables Bazel test sharding support.
@REM This script intercepts test execution, enumerates tests using libtest's
-@REM --list flag, partitions them by shard index, and runs only the relevant subset.
+@REM --list flag, partitions them by stable test-name hash, and runs only the
+@REM relevant subset.
@ECHO OFF
SETLOCAL EnableDelayedExpansion
@@ -65,6 +66,35 @@ IF !FOUND_BINARY! EQU 0 IF DEFINED RUNFILES_DIR (
)
)
+@REM Try 4: manifest-based runfile lookup. This covers nested launchers that
+@REM execute the sharding wrapper from another test's runfiles tree.
+IF !FOUND_BINARY! EQU 0 (
+ SET "MANIFEST=!RUNFILES_MANIFEST_FILE!"
+ IF NOT DEFINED MANIFEST IF EXIST "%~f0.runfiles_manifest" SET "MANIFEST=%~f0.runfiles_manifest"
+ IF NOT DEFINED MANIFEST IF EXIST "%~dpn0.runfiles_manifest" SET "MANIFEST=%~dpn0.runfiles_manifest"
+ IF NOT DEFINED MANIFEST IF EXIST "%~f0.exe.runfiles_manifest" SET "MANIFEST=%~f0.exe.runfiles_manifest"
+
+ IF DEFINED MANIFEST IF EXIST "!MANIFEST!" (
+ SET "TEST_BINARY_MANIFEST_PATH=!TEST_BINARY_RAW!"
+ SET "TEST_BINARY_MANIFEST_PATH=!TEST_BINARY_MANIFEST_PATH:\=/!"
+ IF DEFINED TEST_WORKSPACE SET "TEST_BINARY_MANIFEST_WORKSPACE_PATH=!TEST_WORKSPACE!/!TEST_BINARY_MANIFEST_PATH!"
+ FOR /F "usebackq tokens=1,* delims= " %%A IN ("!MANIFEST!") DO (
+ IF "%%A"=="!TEST_BINARY_MANIFEST_PATH!" (
+ SET "TEST_BINARY_PATH=%%B"
+ SET FOUND_BINARY=1
+ GOTO :FOUND_TEST_BINARY
+ )
+ IF DEFINED TEST_BINARY_MANIFEST_WORKSPACE_PATH IF "%%A"=="!TEST_BINARY_MANIFEST_WORKSPACE_PATH!" (
+ SET "TEST_BINARY_PATH=%%B"
+ SET FOUND_BINARY=1
+ GOTO :FOUND_TEST_BINARY
+ )
+ )
+ )
+)
+
+:FOUND_TEST_BINARY
+
IF !FOUND_BINARY! EQU 0 (
ECHO ERROR: Could not find test binary at any expected location
EXIT /B 1
@@ -73,40 +74,84 @@ IF !FOUND_BINARY! EQU 0 (
EXIT /B 1
)
+@REM Native Bazel test sharding sets TEST_TOTAL_SHARDS/TEST_SHARD_INDEX.
+@REM Explicit shard test targets can set RULES_RUST_TEST_TOTAL_SHARDS/
+@REM RULES_RUST_TEST_SHARD_INDEX instead because Bazel may reserve TEST_*
+@REM variables for its own test runner env.
+SET TOTAL_SHARDS=%RULES_RUST_TEST_TOTAL_SHARDS%
+IF "%TOTAL_SHARDS%"=="" SET TOTAL_SHARDS=%TEST_TOTAL_SHARDS%
+SET SHARD_INDEX=%RULES_RUST_TEST_SHARD_INDEX%
+IF "%SHARD_INDEX%"=="" SET SHARD_INDEX=%TEST_SHARD_INDEX%
+
@REM If sharding is not enabled, run test binary directly
-IF "%TEST_TOTAL_SHARDS%"=="" (
+IF "%TOTAL_SHARDS%"=="" (
!TEST_BINARY_PATH! %*
EXIT /B !ERRORLEVEL!
)
+IF "%TOTAL_SHARDS%"=="0" (
+ !TEST_BINARY_PATH! %*
+ EXIT /B !ERRORLEVEL!
+)
+
+IF "%SHARD_INDEX%"=="" (
+ ECHO ERROR: TEST_SHARD_INDEX or RULES_RUST_TEST_SHARD_INDEX must be set when sharding is enabled
+ EXIT /B 1
+)
@REM Touch status file to advertise sharding support to Bazel
-IF NOT "%TEST_SHARD_STATUS_FILE%"=="" (
+IF NOT "%TEST_SHARD_STATUS_FILE%"=="" IF NOT "%TEST_TOTAL_SHARDS%"=="" IF NOT "%TEST_TOTAL_SHARDS%"=="0" (
TYPE NUL > "%TEST_SHARD_STATUS_FILE%"
)
-@REM Create a temporary file for test list
-SET TEMP_LIST=%TEMP%\rust_test_list_%RANDOM%.txt
+@REM Create per-wrapper temporary files. Prefer Bazel's per-test temp directory;
+@REM when falling back to the shared temp directory, avoid %RANDOM%-only file
+@REM names that can collide across concurrently running Windows test shards.
+SET "TEMP_ROOT=%TEST_TMPDIR%"
+IF NOT DEFINED TEMP_ROOT SET "TEMP_ROOT=%TEMP%"
+IF NOT DEFINED TEMP_ROOT SET "TEMP_ROOT=."
+:CREATE_TEMP_DIR
+SET "TEMP_DIR=!TEMP_ROOT!\rust_test_sharding_!RANDOM!_!RANDOM!_!RANDOM!"
+MKDIR "!TEMP_DIR!" 2>NUL
+IF ERRORLEVEL 1 GOTO :CREATE_TEMP_DIR
+SET "TEMP_LIST=!TEMP_DIR!\list.txt"
+SET "TEMP_SHARD_LIST=!TEMP_DIR!\shard.txt"
@REM Enumerate all tests using libtest's --list flag
!TEST_BINARY_PATH! --list --format terse 2>NUL > "!TEMP_LIST!"
+IF ERRORLEVEL 1 (
+ RMDIR /S /Q "!TEMP_DIR!" 2>NUL
+ EXIT /B 1
+)
-@REM Count tests and filter for this shard
-SET INDEX=0
+@REM Sort tests by ordinal name and filter this shard by stable FNV-1a hash so
+@REM adding or removing one test does not move unrelated tests between shards.
+@REM In the PowerShell fragment below, 2166136261 is the 32-bit FNV offset basis,
+@REM 16777619 is the FNV prime, and 4294967295 is the UInt32 mask. Use decimal
+@REM constants because Windows PowerShell can interpret 0xffffffff as -1.
+powershell.exe -NoProfile -ExecutionPolicy Bypass -Command ^
+ "$ErrorActionPreference = 'Stop';" ^
+ "$tests = @(Get-Content -LiteralPath $env:TEMP_LIST | Where-Object { $_.EndsWith(': test') } | ForEach-Object { $_.Substring(0, $_.Length - 6) });" ^
+ "[Array]::Sort($tests, [StringComparer]::Ordinal);" ^
+ "$totalShards = [uint32]$env:TOTAL_SHARDS; $shardIndex = [uint32]$env:SHARD_INDEX;" ^
+ "$fnvPrime = [uint64]16777619; $u32Mask = [uint64]4294967295;" ^
+ "foreach ($test in $tests) { $hash = [uint32]2166136261; foreach ($byte in [Text.Encoding]::UTF8.GetBytes($test)) { $hash = [uint32](([uint64]($hash -bxor $byte) * $fnvPrime) -band $u32Mask) }; if (($hash %% $totalShards) -eq $shardIndex) { $test } }" ^
+ > "!TEMP_SHARD_LIST!"
+IF ERRORLEVEL 1 (
+ RMDIR /S /Q "!TEMP_DIR!" 2>NUL
+ EXIT /B 1
+)
+
SET SHARD_TESTS=
-FOR /F "tokens=1 delims=:" %%T IN ('TYPE "!TEMP_LIST!" ^| FINDSTR /E ": test"') DO (
- SET /A MOD=!INDEX! %% %TEST_TOTAL_SHARDS%
- IF !MOD! EQU %TEST_SHARD_INDEX% (
- IF "!SHARD_TESTS!"=="" (
- SET SHARD_TESTS=%%T
- ) ELSE (
- SET SHARD_TESTS=!SHARD_TESTS! %%T
- )
+FOR /F "usebackq delims=" %%T IN ("!TEMP_SHARD_LIST!") DO (
+ IF "!SHARD_TESTS!"=="" (
+ SET SHARD_TESTS=%%T
+ ) ELSE (
+ SET SHARD_TESTS=!SHARD_TESTS! %%T
)
- SET /A INDEX=!INDEX! + 1
)
-DEL "!TEMP_LIST!" 2>NUL
+RMDIR /S /Q "!TEMP_DIR!" 2>NUL
@REM If no tests for this shard, exit successfully
IF "!SHARD_TESTS!"=="" (
diff --git a/rust/private/test_sharding_wrapper.sh b/rust/private/test_sharding_wrapper.sh
index e05970ba0..b1f0fb55d 100755
--- a/rust/private/test_sharding_wrapper.sh
+++ b/rust/private/test_sharding_wrapper.sh
@@ -15,40 +15,70 @@
# Wrapper script for rust_test that enables Bazel test sharding support.
# This script intercepts test execution, enumerates tests using libtest's
-# --list flag, partitions them by shard index, and runs only the relevant subset.
+# --list flag, partitions them by stable test-name hash, and runs only the
+# relevant subset.
set -euo pipefail
TEST_BINARY="{{TEST_BINARY}}"
+# Native Bazel test sharding sets TEST_TOTAL_SHARDS/TEST_SHARD_INDEX. Explicit
+# shard test targets can set RULES_RUST_TEST_TOTAL_SHARDS/RULES_RUST_TEST_SHARD_INDEX
+# instead because Bazel may reserve TEST_* variables for its own test runner env.
+TOTAL_SHARDS="${RULES_RUST_TEST_TOTAL_SHARDS:-${TEST_TOTAL_SHARDS:-}}"
+SHARD_INDEX="${RULES_RUST_TEST_SHARD_INDEX:-${TEST_SHARD_INDEX:-}}"
+
+test_shard_index() {
+ local test_name="$1"
+ # FNV-1a 32-bit hash. The initial value is the FNV offset basis, and
+ # 16777619 is the FNV prime. This gives a stable, cheap string hash without
+ # depending on platform-specific tools being present in the test sandbox.
+ local hash=2166136261
+ local byte
+ local char
+ local i
+ local LC_ALL=C
+
+ for ((i = 0; i < ${#test_name}; i++)); do
+ char="${test_name:i:1}"
+ printf -v byte "%d" "'$char"
+ hash=$(( ((hash ^ byte) * 16777619) & 0xffffffff ))
+ done
+
+ echo $(( hash % TOTAL_SHARDS ))
+}
# If sharding is not enabled, run test binary directly
-if [[ -z "${TEST_TOTAL_SHARDS:-}" ]]; then
+if [[ -z "${TOTAL_SHARDS}" || "${TOTAL_SHARDS}" == "0" ]]; then
exec "./${TEST_BINARY}" "$@"
fi
+if [[ -z "${SHARD_INDEX}" ]]; then
+ echo "TEST_SHARD_INDEX or RULES_RUST_TEST_SHARD_INDEX must be set when sharding is enabled" >&2
+ exit 1
+fi
+
# Touch status file to advertise sharding support to Bazel
-if [[ -n "${TEST_SHARD_STATUS_FILE:-}" ]]; then
+if [[ -n "${TEST_SHARD_STATUS_FILE:-}" && "${TEST_TOTAL_SHARDS:-0}" != "0" ]]; then
touch "${TEST_SHARD_STATUS_FILE}"
fi
-# Enumerate all tests using libtest's --list flag
+# Enumerate all tests using libtest's --list flag. Sort the list so execution
+# order does not depend on libtest's output order.
# Output format: "test_name: test" - we need to strip the ": test" suffix
-test_list=$("./${TEST_BINARY}" --list --format terse 2>/dev/null | grep ': test$' | sed 's/: test$//' || true)
+test_list=$("./${TEST_BINARY}" --list --format terse 2>/dev/null | grep ': test$' | sed 's/: test$//' | LC_ALL=C sort || true)
# If no tests found, exit successfully
if [[ -z "$test_list" ]]; then
exit 0
fi
-# Filter tests for this shard
-# test_index % TEST_TOTAL_SHARDS == TEST_SHARD_INDEX
+# Filter tests for this shard. Use a stable name hash instead of list position
+# so adding or removing one test does not move unrelated tests between shards.
shard_tests=()
-index=0
while IFS= read -r test_name; do
- if (( index % TEST_TOTAL_SHARDS == TEST_SHARD_INDEX )); then
+ if (( $(test_shard_index "$test_name") == SHARD_INDEX )); then
shard_tests+=("$test_name")
fi
- ((index++)) || true
done <<< "$test_list"
# If no tests for this shard, exit successfully

View File

@@ -4,7 +4,90 @@
# The toolchain sysroot must therefore carry both stdlib trees so rustc can
# resolve the correct one for each `--target`.
diff --git a/rust/private/repository_utils.bzl b/rust/private/repository_utils.bzl
index ad8aea481..341ed70bb 100644
--- a/rust/private/repository_utils.bzl
+++ b/rust/private/repository_utils.bzl
@@ -342,6 +342,7 @@ rust_toolchain(
name = "{toolchain_name}",
rust_doc = "//:rustdoc",
rust_std = "//:rust_std-{target_triple}",
+ exec_rust_std = {exec_rust_std_label},
rustc = "//:rustc",
linker = {linker_label},
linker_type = {linker_type},
@@ -389,6 +390,7 @@ def BUILD_for_rust_toolchain(
include_llvm_tools = False,
include_linker = False,
include_objcopy = False,
+ exec_rust_std_label = None,
stdlib_linkflags = None,
extra_rustc_flags = None,
extra_exec_rustc_flags = None,
@@ -412,6 +414,7 @@ def BUILD_for_rust_toolchain(
include_llvm_tools (bool): Whether llvm-tools are present in the toolchain.
include_linker (bool): Whether a linker is available in the toolchain.
include_objcopy (bool): Whether rust-objcopy is available in the toolchain.
+ exec_rust_std_label (str, optional): Label for an exec-side stdlib when it differs from rust_std.
stdlib_linkflags (list, optional): Overridden flags needed for linking to rust
stdlib, akin to BAZEL_LINKLIBS. Defaults to
None.
@@ -465,6 +468,7 @@ def BUILD_for_rust_toolchain(
staticlib_ext = system_to_staticlib_ext(target_triple.system),
dylib_ext = system_to_dylib_ext(target_triple.system),
allocator_library = repr(allocator_library_label),
+ exec_rust_std_label = repr(exec_rust_std_label),
global_allocator_library = repr(global_allocator_library_label),
stdlib_linkflags = stdlib_linkflags,
default_edition = default_edition,
diff --git a/rust/repositories.bzl b/rust/repositories.bzl
index e4bd37f4c..66bd95bde 100644
--- a/rust/repositories.bzl
+++ b/rust/repositories.bzl
@@ -574,6 +574,18 @@ def _rust_toolchain_tools_repository_impl(ctx):
build_components.append(rust_stdlib_content)
sha256s.update(rust_stdlib_sha256)
+ exec_rust_std_label = None
+ if exec_triple.str != target_triple.str:
+ exec_rust_stdlib_content, exec_rust_stdlib_sha256 = load_rust_stdlib(
+ ctx = ctx,
+ target_triple = exec_triple,
+ version = version,
+ iso_date = iso_date,
+ )
+ build_components.append(exec_rust_stdlib_content)
+ sha256s.update(exec_rust_stdlib_sha256)
+ exec_rust_std_label = "//:rust_std-{}".format(exec_triple.str)
+
stdlib_linkflags = None
if "BAZEL_RUST_STDLIB_LINKFLAGS" in ctx.os.environ:
stdlib_linkflags = ctx.os.environ["BAZEL_RUST_STDLIB_LINKFLAGS"].split(":")
@@ -590,6 +602,7 @@ def _rust_toolchain_tools_repository_impl(ctx):
include_llvm_tools = include_llvm_tools,
include_linker = include_linker,
include_objcopy = include_objcopy,
+ exec_rust_std_label = exec_rust_std_label,
extra_rustc_flags = ctx.attr.extra_rustc_flags,
extra_exec_rustc_flags = ctx.attr.extra_exec_rustc_flags,
opt_level = ctx.attr.opt_level if ctx.attr.opt_level else None,
@@ -608,6 +621,14 @@ def _rust_toolchain_tools_repository_impl(ctx):
iso_date = iso_date,
)
sha256s.update(rustc_dev_sha256)
+ if exec_triple.str != target_triple.str:
+ exec_rustc_dev_sha256 = load_rustc_dev_nightly(
+ ctx = ctx,
+ target_triple = exec_triple,
+ version = version,
+ iso_date = iso_date,
+ )
+ sha256s.update(exec_rustc_dev_sha256)
ctx.file("WORKSPACE.bazel", """workspace(name = "{}")""".format(
ctx.name,
diff --git a/rust/toolchain.bzl b/rust/toolchain.bzl
index 10465b5a7..6e322535c 100644
--- a/rust/toolchain.bzl
+++ b/rust/toolchain.bzl
@@ -209,6 +209,7 @@ def _generate_sysroot(
@@ -15,16 +98,15 @@ diff --git a/rust/toolchain.bzl b/rust/toolchain.bzl
rust_std = None,
rustfmt = None,
linker = None):
@@ -312,7 +313,15 @@ def _generate_sysroot(
@@ -313,6 +314,14 @@ def _generate_sysroot(
# Made available to support $(location) expansion in stdlib_linkflags and extra_rustc_flags.
transitive_file_sets.append(depset(ctx.files.rust_std))
+
+ sysroot_exec_rust_std = None
+ if exec_rust_std:
+ sysroot_exec_rust_std = _symlink_sysroot_tree(ctx, name, exec_rust_std)
+ transitive_file_sets.extend([sysroot_exec_rust_std])
+
+ # Made available to support $(location) expansion in extra_exec_rustc_flags.
+ transitive_file_sets.append(depset(ctx.files.exec_rust_std))
+
@@ -71,7 +153,7 @@ diff --git a/rust/toolchain.bzl b/rust/toolchain.bzl
linking_context = cc_common.create_linking_context(
linker_inputs = depset([
@@ -793,6 +806,10 @@ rust_toolchain = rule(
@@ -804,6 +817,10 @@ rust_toolchain = rule(
doc = "The Rust standard library.",
mandatory = True,
),
@@ -82,100 +164,3 @@ diff --git a/rust/toolchain.bzl b/rust/toolchain.bzl
"rustc": attr.label(
doc = "The location of the `rustc` binary. Can be a direct source or a filegroup containing one item.",
allow_single_file = True,
diff --git a/rust/private/repository_utils.bzl b/rust/private/repository_utils.bzl
--- a/rust/private/repository_utils.bzl
+++ b/rust/private/repository_utils.bzl
@@ -341,6 +341,7 @@ rust_toolchain(
name = "{toolchain_name}",
rust_doc = "//:rustdoc",
rust_std = "//:rust_std-{target_triple}",
+ exec_rust_std = {exec_rust_std_label},
rustc = "//:rustc",
linker = {linker_label},
linker_type = {linker_type},
@@ -384,6 +385,7 @@ def BUILD_for_rust_toolchain(
include_llvm_tools,
include_linker,
include_objcopy = False,
+ exec_rust_std_label = None,
stdlib_linkflags = None,
extra_rustc_flags = None,
extra_exec_rustc_flags = None,
@@ -405,6 +407,7 @@ def BUILD_for_rust_toolchain(
include_llvm_tools (bool): Whether llvm-tools are present in the toolchain.
include_linker (bool): Whether a linker is available in the toolchain.
include_objcopy (bool): Whether rust-objcopy is available in the toolchain.
+ exec_rust_std_label (str, optional): Label for an exec-side stdlib when it differs from rust_std.
stdlib_linkflags (list, optional): Overridden flags needed for linking to rust
stdlib, akin to BAZEL_LINKLIBS. Defaults to
None.
@@ -453,6 +456,7 @@ def BUILD_for_rust_toolchain(
staticlib_ext = system_to_staticlib_ext(target_triple.system),
dylib_ext = system_to_dylib_ext(target_triple.system),
allocator_library = repr(allocator_library_label),
+ exec_rust_std_label = repr(exec_rust_std_label),
global_allocator_library = repr(global_allocator_library_label),
stdlib_linkflags = stdlib_linkflags,
default_edition = default_edition,
diff --git a/rust/private/rustc.bzl b/rust/private/rustc.bzl
--- a/rust/private/rustc.bzl
+++ b/rust/private/rustc.bzl
@@ -1011,7 +1011,10 @@ def construct_arguments(
if build_metadata and not use_json_output:
fail("build_metadata requires parse_json_output")
- use_exec_target = is_exec_configuration(ctx) and crate_info.type == "bin"
+ # Exec-configuration crates (build scripts, proc-macros, and their
+ # dependencies) must all target the exec triple so they can link against
+ # each other and the exec-side standard library.
+ use_exec_target = is_exec_configuration(ctx)
output_dir = getattr(crate_info.output, "dirname", None)
linker_script = getattr(file, "linker_script", None)
diff --git a/rust/repositories.bzl b/rust/repositories.bzl
--- a/rust/repositories.bzl
+++ b/rust/repositories.bzl
@@ -536,6 +536,18 @@ def _rust_toolchain_tools_repository_impl(ctx):
build_components.append(rust_stdlib_content)
sha256s.update(rust_stdlib_sha256)
+ exec_rust_std_label = None
+ if exec_triple.str != target_triple.str:
+ exec_rust_stdlib_content, exec_rust_stdlib_sha256 = load_rust_stdlib(
+ ctx = ctx,
+ target_triple = exec_triple,
+ version = version,
+ iso_date = iso_date,
+ )
+ build_components.append(exec_rust_stdlib_content)
+ sha256s.update(exec_rust_stdlib_sha256)
+ exec_rust_std_label = "//:rust_std-{}".format(exec_triple.str)
+
stdlib_linkflags = None
if "BAZEL_RUST_STDLIB_LINKFLAGS" in ctx.os.environ:
stdlib_linkflags = ctx.os.environ["BAZEL_RUST_STDLIB_LINKFLAGS"].split(":")
@@ -552,6 +564,7 @@ def _rust_toolchain_tools_repository_impl(ctx):
include_llvm_tools = include_llvm_tools,
include_linker = include_linker,
include_objcopy = include_objcopy,
+ exec_rust_std_label = exec_rust_std_label,
extra_rustc_flags = ctx.attr.extra_rustc_flags,
extra_exec_rustc_flags = ctx.attr.extra_exec_rustc_flags,
opt_level = ctx.attr.opt_level if ctx.attr.opt_level else None,
@@ -575,8 +588,16 @@ def _rust_toolchain_tools_repository_impl(ctx):
if ctx.attr.dev_components:
rustc_dev_sha256 = load_rustc_dev_nightly(
ctx = ctx,
target_triple = target_triple,
version = version,
iso_date = iso_date,
)
sha256s.update(rustc_dev_sha256)
+ if exec_triple.str != target_triple.str:
+ exec_rustc_dev_sha256 = load_rustc_dev_nightly(
+ ctx = ctx,
+ target_triple = exec_triple,
+ version = version,
+ iso_date = iso_date,
+ )
+ sha256s.update(exec_rustc_dev_sha256)

View File

@@ -1,9 +1,11 @@
diff --git a/rust/private/rustc.bzl b/rust/private/rustc.bzl
index 11d97aaa3..ef6ebd61d 100644
--- a/rust/private/rustc.bzl
+++ b/rust/private/rustc.bzl
@@ -501,11 +501,41 @@
@@ -501,11 +501,25 @@ def get_linker_and_args(ctx, crate_type, toolchain, cc_toolchain, feature_config
filtered_args.append(version)
# Keep library search path flags
+ elif processed_arg == "-L" and i + 1 < len(link_args):
+ path = link_args[i + 1]
+ if ld_is_direct_driver and toolchain.target_os == "windows":
@@ -26,37 +28,48 @@
filtered_args.append(processed_arg)
if processed_arg == "--sysroot" and i + 1 < len(link_args):
# Two-part argument, keep the next arg too
@@ -2305,7 +2335,7 @@
return crate.metadata.dirname
return crate.output.dirname
@@ -2265,8 +2279,10 @@ def portable_link_flags(
use_pic,
ambiguous_libs,
get_lib_name,
+ for_windows = False,
for_darwin = False,
- flavor_msvc = False):
+ flavor_msvc = False,
+ use_direct_driver = False):
"""_summary_
-def _portable_link_flags(lib, use_pic, ambiguous_libs, get_lib_name, for_windows = False, for_darwin = False, flavor_msvc = False):
+def _portable_link_flags(lib, use_pic, ambiguous_libs, get_lib_name, for_windows = False, for_darwin = False, flavor_msvc = False, use_direct_driver = False):
artifact = get_preferred_artifact(lib, use_pic)
if ambiguous_libs and artifact.path in ambiguous_libs:
artifact = ambiguous_libs[artifact.path]
@@ -2344,6 +2344,11 @@
artifact.basename.startswith("test-") or artifact.basename.startswith("std-")
Args:
@@ -2319,6 +2335,11 @@ def portable_link_flags(
):
return [] if for_darwin else ["-lstatic=%s" % get_lib_name(artifact)]
+
+ if for_windows and use_direct_driver and not artifact.basename.endswith(".lib"):
+ return [
+ "-Clink-arg={}".format(artifact.path),
+ ]
+
if flavor_msvc:
return [
@@ -2381,7 +2386,7 @@
"-lstatic=%s" % get_lib_name(artifact),
@@ -2355,7 +2376,15 @@ def _make_link_flags_windows(make_link_flags_args, flavor_msvc, use_direct_drive
])
elif include_link_flags:
get_lib_name = get_lib_name_for_windows if flavor_msvc else get_lib_name_default
- ret.extend(_portable_link_flags(lib, use_pic, ambiguous_libs, get_lib_name, flavor_msvc = flavor_msvc))
+ ret.extend(_portable_link_flags(lib, use_pic, ambiguous_libs, get_lib_name, flavor_msvc = flavor_msvc, use_direct_driver = use_direct_driver))
- ret.extend(portable_link_flags(lib, use_pic, ambiguous_libs, get_lib_name, flavor_msvc = flavor_msvc))
+ ret.extend(portable_link_flags(
+ lib,
+ use_pic,
+ ambiguous_libs,
+ get_lib_name,
+ for_windows = True,
+ flavor_msvc = flavor_msvc,
+ use_direct_driver = use_direct_driver,
+ ))
# Windows toolchains can inherit POSIX defaults like -pthread from C deps,
# which fails to link with the MinGW/LLD toolchain. Drop them here.
@@ -2453,14 +2483,21 @@
@@ -2531,14 +2560,21 @@ def _add_native_link_flags(
else:
# For all other crate types we want to link C++ runtime library statically
# (for example libstdc++.a or libc++.a).
@@ -74,16 +87,9 @@
format_each = "-Lnative=%s",
)
if include_link_flags:
- args.add_all(
args.add_all(
- cc_toolchain.static_runtime_lib(feature_configuration = feature_configuration),
- map_each = get_lib_name,
- format_each = "-lstatic=%s",
- )
+ args.add_all(
+ runtime_libs,
+ map_each = get_lib_name,
+ format_each = "-lstatic=%s",
+ )
def _get_dirname(file):
"""A helper function for `_add_native_link_flags`.
map_each = get_lib_name,
format_each = "-lstatic=%s",
)

View File

@@ -0,0 +1,47 @@
@echo off
setlocal EnableExtensions EnableDelayedExpansion
call :resolve_runfile test_bin "__TEST_BIN__"
if errorlevel 1 exit /b 1
"%test_bin%" %*
exit /b %ERRORLEVEL%
:resolve_runfile
setlocal EnableExtensions EnableDelayedExpansion
set "logical_path=%~2"
set "workspace_logical_path=%logical_path%"
if defined TEST_WORKSPACE set "workspace_logical_path=%TEST_WORKSPACE%/%logical_path%"
set "native_logical_path=%logical_path:/=\%"
set "native_workspace_logical_path=%workspace_logical_path:/=\%"
for %%R in ("%RUNFILES_DIR%" "%TEST_SRCDIR%") do (
set "runfiles_root=%%~R"
if defined runfiles_root (
if exist "!runfiles_root!\!native_logical_path!" (
endlocal & set "%~1=!runfiles_root!\!native_logical_path!" & exit /b 0
)
if exist "!runfiles_root!\!native_workspace_logical_path!" (
endlocal & set "%~1=!runfiles_root!\!native_workspace_logical_path!" & exit /b 0
)
)
)
set "manifest=%RUNFILES_MANIFEST_FILE%"
if not defined manifest if exist "%~f0.runfiles_manifest" set "manifest=%~f0.runfiles_manifest"
if not defined manifest if exist "%~dpn0.runfiles_manifest" set "manifest=%~dpn0.runfiles_manifest"
if not defined manifest if exist "%~f0.exe.runfiles_manifest" set "manifest=%~f0.exe.runfiles_manifest"
if defined manifest if exist "%manifest%" (
for /f "usebackq tokens=1,* delims= " %%A in ("%manifest%") do (
if "%%A"=="%logical_path%" (
endlocal & set "%~1=%%B" & exit /b 0
)
if "%%A"=="%workspace_logical_path%" (
endlocal & set "%~1=%%B" & exit /b 0
)
)
)
>&2 echo failed to resolve runfile: %logical_path%
endlocal & exit /b 1

View File

@@ -0,0 +1,48 @@
#!/usr/bin/env bash
set -euo pipefail
resolve_runfile() {
local logical_path="$1"
local workspace_logical_path="${logical_path}"
if [[ -n "${TEST_WORKSPACE:-}" ]]; then
workspace_logical_path="${TEST_WORKSPACE}/${logical_path}"
fi
for runfiles_root in "${RUNFILES_DIR:-}" "${TEST_SRCDIR:-}"; do
if [[ -n "${runfiles_root}" && -e "${runfiles_root}/${logical_path}" ]]; then
printf '%s\n' "${runfiles_root}/${logical_path}"
return 0
fi
if [[ -n "${runfiles_root}" && -e "${runfiles_root}/${workspace_logical_path}" ]]; then
printf '%s\n' "${runfiles_root}/${workspace_logical_path}"
return 0
fi
done
local manifest="${RUNFILES_MANIFEST_FILE:-}"
if [[ -z "${manifest}" ]]; then
if [[ -f "$0.runfiles_manifest" ]]; then
manifest="$0.runfiles_manifest"
elif [[ -f "$0.exe.runfiles_manifest" ]]; then
manifest="$0.exe.runfiles_manifest"
fi
fi
if [[ -n "${manifest}" && -f "${manifest}" ]]; then
local resolved=""
resolved="$(awk -v key="${logical_path}" '$1 == key { $1 = ""; sub(/^ /, ""); print; exit }' "${manifest}")"
if [[ -z "${resolved}" ]]; then
resolved="$(awk -v key="${workspace_logical_path}" '$1 == key { $1 = ""; sub(/^ /, ""); print; exit }' "${manifest}")"
fi
if [[ -n "${resolved}" ]]; then
printf '%s\n' "${resolved}"
return 0
fi
fi
echo "failed to resolve runfile: $logical_path" >&2
return 1
}
test_bin="$(resolve_runfile "__TEST_BIN__")"
exec "${test_bin}" "$@"