Files
codex/sdk/python/tests/test_app_server_inputs.py
Ahmed Ibrahim aa9e8f0262 [8/8] Add Python SDK Ruff formatting (#22021)
## Why

The Python SDK needs the same tight formatter/lint loop as the rest of
the repo: a safe Ruff autofix pass, Ruff formatting, editor save
behavior, and CI checks that catch drift. Without that loop, SDK changes
can land with formatting or import ordering that differs from what
reviewers and CI expect.

## What

- Add Ruff configuration to `sdk/python/pyproject.toml`, excluding
generated protocol code and notebooks from the normal lint/format pass.
- Update `just fmt` so it still formats Rust and also runs Python SDK
Ruff autofix and formatting.
- Add Python SDK CI steps for `ruff check` and `ruff format --check`
before pytest.
- Recommend the Ruff VS Code extension and enable Python
format/fix/organize-on-save so Cmd+S uses the same tooling.
- Apply the resulting Ruff formatting to SDK Python files, examples, and
the checked-in generated `v2_all.py` output emitted by the pinned
generator.
- Add a guard test for the `just fmt` recipe so it keeps working from
both Rust and Python SDK working directories.

## Stack

1. #21891 `[1/8]` Pin Python SDK runtime dependency
2. #21893 `[2/8]` Generate Python SDK types from pinned runtime
3. #21895 `[3/8]` Run Python SDK tests in CI
4. #21896 `[4/8]` Define Python SDK public API surface
5. #21905 `[5/8]` Rename Python SDK package to `openai-codex`
6. #21910 `[6/8]` Add high-level Python SDK approval mode
7. #22014 `[7/8]` Add Python SDK app-server integration harness
8. This PR `[8/8]` Add Python SDK Ruff formatting

## Verification

- Added `test_root_fmt_recipe_formats_rust_and_python_sdk` for the
shared format recipe.
- Ran `just fmt` after the recipe update.

---------

Co-authored-by: Codex <noreply@openai.com>
2026-05-12 01:10:29 +03:00

122 lines
4.1 KiB
Python

from __future__ import annotations
from app_server_harness import AppServerHarness
from app_server_helpers import TINY_PNG_BYTES
from openai_codex import Codex, ImageInput, LocalImageInput, SkillInput, TextInput
def test_remote_image_input_reaches_responses_api(
tmp_path,
) -> None:
"""Remote image inputs should survive the SDK and app-server boundary."""
remote_image_url = "https://example.com/codex.png"
with AppServerHarness(tmp_path) as harness:
harness.responses.enqueue_assistant_message(
"remote image received",
response_id="remote-image",
)
with Codex(config=harness.app_server_config()) as codex:
result = codex.thread_start().run(
[
TextInput("Describe the remote image."),
ImageInput(remote_image_url),
]
)
request = harness.responses.single_request()
assert {
"final_response": result.final_response,
"contains_user_prompt": "Describe the remote image." in request.message_input_texts("user"),
"image_urls": request.message_image_urls("user"),
} == {
"final_response": "remote image received",
"contains_user_prompt": True,
"image_urls": [remote_image_url],
}
def test_local_image_input_reaches_responses_api(
tmp_path,
) -> None:
"""Local image inputs should become data URLs after crossing the app-server."""
local_image = tmp_path / "local.png"
local_image.write_bytes(TINY_PNG_BYTES)
with AppServerHarness(tmp_path) as harness:
harness.responses.enqueue_assistant_message(
"local image received",
response_id="local-image",
)
with Codex(config=harness.app_server_config()) as codex:
result = codex.thread_start().run(
[
TextInput("Describe the local image."),
LocalImageInput(str(local_image)),
]
)
request = harness.responses.single_request()
assert {
"final_response": result.final_response,
"contains_user_prompt": "Describe the local image." in request.message_input_texts("user"),
"image_url_is_png_data_url": request.message_image_urls("user")[-1].startswith(
"data:image/png;base64,"
),
} == {
"final_response": "local image received",
"contains_user_prompt": True,
"image_url_is_png_data_url": True,
}
def test_skill_input_injects_loaded_skill_body(tmp_path) -> None:
"""SkillInput should inject the selected loaded skill into model input."""
skill_body = "Use the word cobalt."
with AppServerHarness(tmp_path) as harness:
skill_file = harness.workspace / ".agents" / "skills" / "demo" / "SKILL.md"
skill_file.parent.mkdir(parents=True)
skill_file.write_text(f"---\nname: demo\ndescription: demo skill\n---\n\n{skill_body}\n")
skill_path = skill_file.resolve()
harness.responses.enqueue_assistant_message(
"skill received",
response_id="skill-input",
)
with Codex(config=harness.app_server_config()) as codex:
result = codex.thread_start().run(
[
TextInput("Use the selected skill."),
SkillInput("demo", str(skill_path)),
]
)
request = harness.responses.single_request()
skill_blocks = [
text for text in request.message_input_texts("user") if text.startswith("<skill>")
]
assert {
"final_response": result.final_response,
"skill_blocks": [
{
"has_name": "<name>demo</name>" in text,
"has_path": f"<path>{skill_path}</path>" in text,
"has_body": skill_body in text,
}
for text in skill_blocks
],
} == {
"final_response": "skill received",
"skill_blocks": [
{
"has_name": True,
"has_path": True,
"has_body": True,
}
],
}