mirror of
https://github.com/openai/codex.git
synced 2026-05-14 16:22:51 +00:00
## Why The Python SDK needs the same tight formatter/lint loop as the rest of the repo: a safe Ruff autofix pass, Ruff formatting, editor save behavior, and CI checks that catch drift. Without that loop, SDK changes can land with formatting or import ordering that differs from what reviewers and CI expect. ## What - Add Ruff configuration to `sdk/python/pyproject.toml`, excluding generated protocol code and notebooks from the normal lint/format pass. - Update `just fmt` so it still formats Rust and also runs Python SDK Ruff autofix and formatting. - Add Python SDK CI steps for `ruff check` and `ruff format --check` before pytest. - Recommend the Ruff VS Code extension and enable Python format/fix/organize-on-save so Cmd+S uses the same tooling. - Apply the resulting Ruff formatting to SDK Python files, examples, and the checked-in generated `v2_all.py` output emitted by the pinned generator. - Add a guard test for the `just fmt` recipe so it keeps working from both Rust and Python SDK working directories. ## Stack 1. #21891 `[1/8]` Pin Python SDK runtime dependency 2. #21893 `[2/8]` Generate Python SDK types from pinned runtime 3. #21895 `[3/8]` Run Python SDK tests in CI 4. #21896 `[4/8]` Define Python SDK public API surface 5. #21905 `[5/8]` Rename Python SDK package to `openai-codex` 6. #21910 `[6/8]` Add high-level Python SDK approval mode 7. #22014 `[7/8]` Add Python SDK app-server integration harness 8. This PR `[8/8]` Add Python SDK Ruff formatting ## Verification - Added `test_root_fmt_recipe_formats_rust_and_python_sdk` for the shared format recipe. - Ran `just fmt` after the recipe update. --------- Co-authored-by: Codex <noreply@openai.com>
82 lines
3.0 KiB
Python
82 lines
3.0 KiB
Python
from __future__ import annotations
|
|
|
|
from app_server_harness import AppServerHarness
|
|
from app_server_helpers import agent_message_texts, streaming_response
|
|
|
|
from openai_codex import Codex, TextInput
|
|
from openai_codex.generated.v2_all import TurnStatus
|
|
|
|
|
|
def test_turn_steer_adds_follow_up_input(tmp_path) -> None:
|
|
"""Steering an active turn should create a follow-up Responses request."""
|
|
with AppServerHarness(tmp_path) as harness:
|
|
harness.responses.enqueue_sse(
|
|
streaming_response("steer-first", "msg-steer-first", ["before steer"]),
|
|
delay_between_events_s=0.2,
|
|
)
|
|
harness.responses.enqueue_assistant_message(
|
|
"after steer",
|
|
response_id="steer-second",
|
|
)
|
|
|
|
with Codex(config=harness.app_server_config()) as codex:
|
|
thread = codex.thread_start()
|
|
turn = thread.turn(TextInput("Start a steerable turn."))
|
|
harness.responses.wait_for_requests(1)
|
|
steer = turn.steer(TextInput("Use this steering input."))
|
|
events = list(turn.stream())
|
|
requests = harness.responses.wait_for_requests(2)
|
|
|
|
assert {
|
|
"steered_turn_id": steer.turn_id,
|
|
"turn_id": turn.id,
|
|
"agent_messages": agent_message_texts(events),
|
|
"last_user_texts": [request.message_input_texts("user")[-1] for request in requests],
|
|
} == {
|
|
"steered_turn_id": turn.id,
|
|
"turn_id": turn.id,
|
|
"agent_messages": ["before steer", "after steer"],
|
|
"last_user_texts": [
|
|
"Start a steerable turn.",
|
|
"Use this steering input.",
|
|
],
|
|
}
|
|
|
|
|
|
def test_turn_interrupt_stops_active_turn_and_follow_up_runs(tmp_path) -> None:
|
|
"""Interrupting an active turn should complete it and leave the thread usable."""
|
|
with AppServerHarness(tmp_path) as harness:
|
|
harness.responses.enqueue_sse(
|
|
streaming_response(
|
|
"interrupt-first",
|
|
"msg-interrupt-first",
|
|
["still ", "running"],
|
|
),
|
|
delay_between_events_s=0.2,
|
|
)
|
|
harness.responses.enqueue_assistant_message(
|
|
"after interrupt",
|
|
response_id="interrupt-follow-up",
|
|
)
|
|
|
|
with Codex(config=harness.app_server_config()) as codex:
|
|
thread = codex.thread_start()
|
|
interrupted_turn = thread.turn(TextInput("Start a long turn."))
|
|
harness.responses.wait_for_requests(1)
|
|
interrupt_response = interrupted_turn.interrupt()
|
|
completed = interrupted_turn.run()
|
|
follow_up = thread.run("Continue after the interrupt.")
|
|
|
|
assert {
|
|
"interrupt_response": interrupt_response.model_dump(
|
|
by_alias=True,
|
|
mode="json",
|
|
),
|
|
"interrupted_status": completed.status,
|
|
"follow_up": follow_up.final_response,
|
|
} == {
|
|
"interrupt_response": {},
|
|
"interrupted_status": TurnStatus.interrupted,
|
|
"follow_up": "after interrupt",
|
|
}
|