[8/8] Add Python SDK Ruff formatting (#22021)

## Why

The Python SDK needs the same tight formatter/lint loop as the rest of
the repo: a safe Ruff autofix pass, Ruff formatting, editor save
behavior, and CI checks that catch drift. Without that loop, SDK changes
can land with formatting or import ordering that differs from what
reviewers and CI expect.

## What

- Add Ruff configuration to `sdk/python/pyproject.toml`, excluding
generated protocol code and notebooks from the normal lint/format pass.
- Update `just fmt` so it still formats Rust and also runs Python SDK
Ruff autofix and formatting.
- Add Python SDK CI steps for `ruff check` and `ruff format --check`
before pytest.
- Recommend the Ruff VS Code extension and enable Python
format/fix/organize-on-save so Cmd+S uses the same tooling.
- Apply the resulting Ruff formatting to SDK Python files, examples, and
the checked-in generated `v2_all.py` output emitted by the pinned
generator.
- Add a guard test for the `just fmt` recipe so it keeps working from
both Rust and Python SDK working directories.

## Stack

1. #21891 `[1/8]` Pin Python SDK runtime dependency
2. #21893 `[2/8]` Generate Python SDK types from pinned runtime
3. #21895 `[3/8]` Run Python SDK tests in CI
4. #21896 `[4/8]` Define Python SDK public API surface
5. #21905 `[5/8]` Rename Python SDK package to `openai-codex`
6. #21910 `[6/8]` Add high-level Python SDK approval mode
7. #22014 `[7/8]` Add Python SDK app-server integration harness
8. This PR `[8/8]` Add Python SDK Ruff formatting

## Verification

- Added `test_root_fmt_recipe_formats_rust_and_python_sdk` for the
shared format recipe.
- Ran `just fmt` after the recipe update.

---------

Co-authored-by: Codex <noreply@openai.com>
This commit is contained in:
Ahmed Ibrahim
2026-05-12 01:10:29 +03:00
committed by GitHub
parent 3e10e09e24
commit aa9e8f0262
51 changed files with 660 additions and 1163 deletions

View File

@@ -1,18 +1,4 @@
from .client import AppServerConfig
from .errors import (
AppServerError,
AppServerRpcError,
InternalRpcError,
InvalidParamsError,
InvalidRequestError,
JsonRpcError,
MethodNotFoundError,
ParseError,
RetryLimitExceededError,
ServerBusyError,
TransportClosedError,
is_retryable_error,
)
from ._version import __version__
from .api import (
ApprovalMode,
AsyncCodex,
@@ -30,8 +16,22 @@ from .api import (
Thread,
TurnHandle,
)
from .client import AppServerConfig
from .errors import (
AppServerError,
AppServerRpcError,
InternalRpcError,
InvalidParamsError,
InvalidRequestError,
JsonRpcError,
MethodNotFoundError,
ParseError,
RetryLimitExceededError,
ServerBusyError,
TransportClosedError,
is_retryable_error,
)
from .retry import retry_on_overload
from ._version import __version__
__all__ = [
"__version__",

View File

@@ -122,9 +122,7 @@ class MessageRouter:
if notification.method == "turn/completed":
self._pending_turn_notifications.pop(turn_id, None)
return
self._pending_turn_notifications.setdefault(turn_id, deque()).append(
notification
)
self._pending_turn_notifications.setdefault(turn_id, deque()).append(notification)
return
turn_queue.put(notification)

View File

@@ -1,8 +1,7 @@
from __future__ import annotations
import re
from importlib.metadata import PackageNotFoundError
from importlib.metadata import version as distribution_version
from importlib.metadata import PackageNotFoundError, version as distribution_version
from pathlib import Path
DISTRIBUTION_NAME = "openai-codex"

View File

@@ -5,6 +5,23 @@ from dataclasses import dataclass
from enum import Enum
from typing import AsyncIterator, Iterator, NoReturn
from ._inputs import (
ImageInput as ImageInput,
Input,
InputItem as InputItem,
LocalImageInput as LocalImageInput,
MentionInput as MentionInput,
RunInput,
SkillInput as SkillInput,
TextInput as TextInput,
_normalize_run_input,
_to_wire_input,
)
from ._run import (
RunResult,
_collect_async_run_result,
_collect_run_result,
)
from .async_client import AsyncAppServerClient
from .client import AppServerClient, AppServerConfig
from .generated.v2_all import (
@@ -30,8 +47,8 @@ from .generated.v2_all import (
ThreadSortKey,
ThreadSource,
ThreadSourceKind,
ThreadStartSource,
ThreadStartParams,
ThreadStartSource,
Turn as AppServerTurn,
TurnCompletedNotification,
TurnInterruptResponse,
@@ -39,23 +56,6 @@ from .generated.v2_all import (
TurnSteerResponse,
)
from .models import InitializeResponse, JsonObject, Notification, ServerInfo
from ._inputs import (
ImageInput as ImageInput,
Input,
InputItem as InputItem,
LocalImageInput as LocalImageInput,
MentionInput as MentionInput,
RunInput,
SkillInput as SkillInput,
TextInput as TextInput,
_normalize_run_input,
_to_wire_input,
)
from ._run import (
RunResult,
_collect_async_run_result,
_collect_run_result,
)
def _split_user_agent(user_agent: str) -> tuple[str | None, str | None]:
@@ -151,11 +151,7 @@ class Codex:
normalized_server_name = (server_name or "").strip()
normalized_server_version = (server_version or "").strip()
if (
not user_agent
or not normalized_server_name
or not normalized_server_version
):
if not user_agent or not normalized_server_name or not normalized_server_version:
raise RuntimeError(
"initialize response missing required metadata "
f"(user_agent={user_agent!r}, server_name={normalized_server_name!r}, server_version={normalized_server_version!r})"
@@ -262,9 +258,7 @@ class Codex:
sandbox: SandboxMode | None = None,
service_tier: str | None = None,
) -> Thread:
approval_policy, approvals_reviewer = _approval_mode_override_settings(
approval_mode
)
approval_policy, approvals_reviewer = _approval_mode_override_settings(approval_mode)
params = ThreadResumeParams(
thread_id=thread_id,
approval_policy=approval_policy,
@@ -298,9 +292,7 @@ class Codex:
service_tier: str | None = None,
thread_source: ThreadSource | None = None,
) -> Thread:
approval_policy, approvals_reviewer = _approval_mode_override_settings(
approval_mode
)
approval_policy, approvals_reviewer = _approval_mode_override_settings(approval_mode)
params = ThreadForkParams(
thread_id=thread_id,
approval_policy=approval_policy,
@@ -470,9 +462,7 @@ class AsyncCodex:
service_tier: str | None = None,
) -> AsyncThread:
await self._ensure_initialized()
approval_policy, approvals_reviewer = _approval_mode_override_settings(
approval_mode
)
approval_policy, approvals_reviewer = _approval_mode_override_settings(approval_mode)
params = ThreadResumeParams(
thread_id=thread_id,
approval_policy=approval_policy,
@@ -507,9 +497,7 @@ class AsyncCodex:
thread_source: ThreadSource | None = None,
) -> AsyncThread:
await self._ensure_initialized()
approval_policy, approvals_reviewer = _approval_mode_override_settings(
approval_mode
)
approval_policy, approvals_reviewer = _approval_mode_override_settings(approval_mode)
params = ThreadForkParams(
thread_id=thread_id,
approval_policy=approval_policy,
@@ -597,9 +585,7 @@ class Thread:
summary: ReasoningSummary | None = None,
) -> TurnHandle:
wire_input = _to_wire_input(input)
approval_policy, approvals_reviewer = _approval_mode_override_settings(
approval_mode
)
approval_policy, approvals_reviewer = _approval_mode_override_settings(approval_mode)
params = TurnStartParams(
thread_id=self.id,
input=wire_input,
@@ -683,9 +669,7 @@ class AsyncThread:
) -> AsyncTurnHandle:
await self._codex._ensure_initialized()
wire_input = _to_wire_input(input)
approval_policy, approvals_reviewer = _approval_mode_override_settings(
approval_mode
)
approval_policy, approvals_reviewer = _approval_mode_override_settings(approval_mode)
params = TurnStartParams(
thread_id=self.id,
input=wire_input,
@@ -711,9 +695,7 @@ class AsyncThread:
async def read(self, *, include_turns: bool = False) -> ThreadReadResponse:
await self._codex._ensure_initialized()
return await self._codex._client.thread_read(
self.id, include_turns=include_turns
)
return await self._codex._client.thread_read(self.id, include_turns=include_turns)
async def set_name(self, name: str) -> ThreadSetNameResponse:
await self._codex._ensure_initialized()
@@ -758,10 +740,7 @@ class TurnHandle:
try:
for event in stream:
payload = event.payload
if (
isinstance(payload, TurnCompletedNotification)
and payload.turn.id == self.id
):
if isinstance(payload, TurnCompletedNotification) and payload.turn.id == self.id:
completed = payload
finally:
stream.close()
@@ -812,10 +791,7 @@ class AsyncTurnHandle:
try:
async for event in stream:
payload = event.payload
if (
isinstance(payload, TurnCompletedNotification)
and payload.turn.id == self.id
):
if isinstance(payload, TurnCompletedNotification) and payload.turn.id == self.id:
completed = payload
finally:
await stream.aclose()

View File

@@ -127,9 +127,7 @@ class AsyncAppServerClient:
"""List threads using the wrapped sync client."""
return await self._call_sync(self._sync.thread_list, params)
async def thread_read(
self, thread_id: str, include_turns: bool = False
) -> ThreadReadResponse:
async def thread_read(self, thread_id: str, include_turns: bool = False) -> ThreadReadResponse:
"""Read a thread using the wrapped sync client."""
return await self._call_sync(self._sync.thread_read, thread_id, include_turns)
@@ -164,13 +162,9 @@ class AsyncAppServerClient:
params: V2TurnStartParams | JsonObject | None = None,
) -> TurnStartResponse:
"""Start a turn using the wrapped sync client."""
return await self._call_sync(
self._sync.turn_start, thread_id, input_items, params
)
return await self._call_sync(self._sync.turn_start, thread_id, input_items, params)
async def turn_interrupt(
self, thread_id: str, turn_id: str
) -> TurnInterruptResponse:
async def turn_interrupt(self, thread_id: str, turn_id: str) -> TurnInterruptResponse:
"""Interrupt a turn using the wrapped sync client."""
return await self._call_sync(self._sync.turn_interrupt, thread_id, turn_id)

View File

@@ -12,6 +12,8 @@ from typing import Callable, Iterator, TypeVar
from pydantic import BaseModel
from ._message_router import MessageRouter
from ._version import __version__ as SDK_VERSION
from .errors import AppServerError, TransportClosedError
from .generated.notification_registry import NOTIFICATION_MODELS
from .generated.v2_all import (
@@ -43,9 +45,7 @@ from .models import (
Notification,
UnknownNotification,
)
from ._message_router import MessageRouter
from .retry import retry_on_overload
from ._version import __version__ as SDK_VERSION
ModelT = TypeVar("ModelT", bound=BaseModel)
ApprovalHandler = Callable[[str, JsonObject | None], JsonObject]
@@ -76,9 +76,7 @@ def _params_dict(
return dumped
if isinstance(params, dict):
return params
raise TypeError(
f"Expected generated params model or dict, got {type(params).__name__}"
)
raise TypeError(f"Expected generated params model or dict, got {type(params).__name__}")
def _installed_codex_path() -> Path:
@@ -248,9 +246,7 @@ class AppServerClient:
waiter = self._router.create_response_waiter(request_id)
try:
self._write_message(
{"id": request_id, "method": method, "params": params or {}}
)
self._write_message({"id": request_id, "method": method, "params": params or {}})
except BaseException:
self._router.discard_response_waiter(request_id)
raise
@@ -293,20 +289,14 @@ class AppServerClient:
params: V2ThreadResumeParams | JsonObject | None = None,
) -> ThreadResumeResponse:
payload = {"threadId": thread_id, **_params_dict(params)}
return self.request(
"thread/resume", payload, response_model=ThreadResumeResponse
)
return self.request("thread/resume", payload, response_model=ThreadResumeResponse)
def thread_list(
self, params: V2ThreadListParams | JsonObject | None = None
) -> ThreadListResponse:
return self.request(
"thread/list", _params_dict(params), response_model=ThreadListResponse
)
return self.request("thread/list", _params_dict(params), response_model=ThreadListResponse)
def thread_read(
self, thread_id: str, include_turns: bool = False
) -> ThreadReadResponse:
def thread_read(self, thread_id: str, include_turns: bool = False) -> ThreadReadResponse:
return self.request(
"thread/read",
{"threadId": thread_id, "includeTurns": include_turns},
@@ -461,16 +451,12 @@ class AppServerClient:
model = NOTIFICATION_MODELS.get(method)
if model is None:
return Notification(
method=method, payload=UnknownNotification(params=params_dict)
)
return Notification(method=method, payload=UnknownNotification(params=params_dict))
try:
payload = model.model_validate(params_dict)
except Exception: # noqa: BLE001
return Notification(
method=method, payload=UnknownNotification(params=params_dict)
)
return Notification(method=method, payload=UnknownNotification(params=params_dict))
return Notification(method=method, payload=payload)
def _normalize_input_items(
@@ -483,9 +469,7 @@ class AppServerClient:
return [input_items]
return input_items
def _default_approval_handler(
self, method: str, params: JsonObject | None
) -> JsonObject:
def _default_approval_handler(self, method: str, params: JsonObject | None) -> JsonObject:
"""Accept approval requests when the caller did not provide a handler."""
if method == "item/commandExecution/requestApproval":
return {"decision": "accept"}

View File

@@ -66,11 +66,7 @@ def _is_server_overloaded(data: Any) -> bool:
return data.lower() == "server_overloaded"
if isinstance(data, dict):
direct = (
data.get("codex_error_info")
or data.get("codexErrorInfo")
or data.get("errorInfo")
)
direct = data.get("codex_error_info") or data.get("codexErrorInfo") or data.get("errorInfo")
if isinstance(direct, str) and direct.lower() == "server_overloaded":
return True
if isinstance(direct, dict):

File diff suppressed because it is too large Load Diff