Files
codex/sdk/python/tests/test_app_server_inputs.py
Ahmed Ibrahim 3e10e09e24 [7/8] Add Python SDK app-server integration harness (#22014)
## Why

The SDK had behavioral tests that replaced SDK client internals. Those
tests could catch wrapper mistakes, but they did not prove the pinned
app-server runtime, generated notification models, request routing, and
sync/async public clients worked together.

This PR adds deterministic integration coverage that starts the pinned
`codex app-server` process and mocks only the upstream Responses HTTP
boundary.

## What

- Add `AppServerHarness` and `MockResponsesServer` helpers for isolated
`CODEX_HOME`, mock-provider config, queued SSE responses, and captured
`/v1/responses` requests.
- Add shared helpers for SSE construction, stream assertions,
approval-policy inspection, and image fixtures.
- Split integration coverage into focused modules for run behavior,
inputs, streaming, turn controls, approvals, and thread lifecycle.
- Cover sync and async `Thread.run`, `TurnHandle.stream`, interleaved
streams, approval-mode persistence, lifecycle helpers, final-answer
phase handling, image inputs, loaded skill input injection, steering,
interruption, listing, history reads, run overrides, and token usage
mapping.
- Replace public-wrapper tests that duplicated integration-test behavior
with lower-level client tests only where direct client behavior is the
thing under test.

## Stack

1. #21891 `[1/8]` Pin Python SDK runtime dependency
2. #21893 `[2/8]` Generate Python SDK types from pinned runtime
3. #21895 `[3/8]` Run Python SDK tests in CI
4. #21896 `[4/8]` Define Python SDK public API surface
5. #21905 `[5/8]` Rename Python SDK package to `openai-codex`
6. #21910 `[6/8]` Add high-level Python SDK approval mode
7. This PR `[7/8]` Add Python SDK app-server integration harness
8. #22021 `[8/8]` Add Python SDK Ruff formatting

## Verification

- Added pinned app-server integration tests under
`sdk/python/tests/test_app_server_*.py` and
`test_real_app_server_integration.py`.

---------

Co-authored-by: Codex <noreply@openai.com>
2026-05-12 01:06:41 +03:00

127 lines
4.1 KiB
Python

from __future__ import annotations
from app_server_harness import AppServerHarness
from openai_codex import Codex, ImageInput, LocalImageInput, SkillInput, TextInput
from app_server_helpers import TINY_PNG_BYTES
def test_remote_image_input_reaches_responses_api(
tmp_path,
) -> None:
"""Remote image inputs should survive the SDK and app-server boundary."""
remote_image_url = "https://example.com/codex.png"
with AppServerHarness(tmp_path) as harness:
harness.responses.enqueue_assistant_message(
"remote image received",
response_id="remote-image",
)
with Codex(config=harness.app_server_config()) as codex:
result = codex.thread_start().run(
[
TextInput("Describe the remote image."),
ImageInput(remote_image_url),
]
)
request = harness.responses.single_request()
assert {
"final_response": result.final_response,
"contains_user_prompt": "Describe the remote image."
in request.message_input_texts("user"),
"image_urls": request.message_image_urls("user"),
} == {
"final_response": "remote image received",
"contains_user_prompt": True,
"image_urls": [remote_image_url],
}
def test_local_image_input_reaches_responses_api(
tmp_path,
) -> None:
"""Local image inputs should become data URLs after crossing the app-server."""
local_image = tmp_path / "local.png"
local_image.write_bytes(TINY_PNG_BYTES)
with AppServerHarness(tmp_path) as harness:
harness.responses.enqueue_assistant_message(
"local image received",
response_id="local-image",
)
with Codex(config=harness.app_server_config()) as codex:
result = codex.thread_start().run(
[
TextInput("Describe the local image."),
LocalImageInput(str(local_image)),
]
)
request = harness.responses.single_request()
assert {
"final_response": result.final_response,
"contains_user_prompt": "Describe the local image."
in request.message_input_texts("user"),
"image_url_is_png_data_url": request.message_image_urls("user")[-1].startswith(
"data:image/png;base64,"
),
} == {
"final_response": "local image received",
"contains_user_prompt": True,
"image_url_is_png_data_url": True,
}
def test_skill_input_injects_loaded_skill_body(tmp_path) -> None:
"""SkillInput should inject the selected loaded skill into model input."""
skill_body = "Use the word cobalt."
with AppServerHarness(tmp_path) as harness:
skill_file = harness.workspace / ".agents" / "skills" / "demo" / "SKILL.md"
skill_file.parent.mkdir(parents=True)
skill_file.write_text(
f"---\nname: demo\ndescription: demo skill\n---\n\n{skill_body}\n"
)
skill_path = skill_file.resolve()
harness.responses.enqueue_assistant_message(
"skill received",
response_id="skill-input",
)
with Codex(config=harness.app_server_config()) as codex:
result = codex.thread_start().run(
[
TextInput("Use the selected skill."),
SkillInput("demo", str(skill_path)),
]
)
request = harness.responses.single_request()
skill_blocks = [
text
for text in request.message_input_texts("user")
if text.startswith("<skill>")
]
assert {
"final_response": result.final_response,
"skill_blocks": [
{
"has_name": "<name>demo</name>" in text,
"has_path": f"<path>{skill_path}</path>" in text,
"has_body": skill_body in text,
}
for text in skill_blocks
],
} == {
"final_response": "skill received",
"skill_blocks": [
{
"has_name": True,
"has_path": True,
"has_body": True,
}
],
}