mirror of
https://github.com/openai/codex.git
synced 2026-05-14 08:12:36 +00:00
## Why The Python SDK needs the same tight formatter/lint loop as the rest of the repo: a safe Ruff autofix pass, Ruff formatting, editor save behavior, and CI checks that catch drift. Without that loop, SDK changes can land with formatting or import ordering that differs from what reviewers and CI expect. ## What - Add Ruff configuration to `sdk/python/pyproject.toml`, excluding generated protocol code and notebooks from the normal lint/format pass. - Update `just fmt` so it still formats Rust and also runs Python SDK Ruff autofix and formatting. - Add Python SDK CI steps for `ruff check` and `ruff format --check` before pytest. - Recommend the Ruff VS Code extension and enable Python format/fix/organize-on-save so Cmd+S uses the same tooling. - Apply the resulting Ruff formatting to SDK Python files, examples, and the checked-in generated `v2_all.py` output emitted by the pinned generator. - Add a guard test for the `just fmt` recipe so it keeps working from both Rust and Python SDK working directories. ## Stack 1. #21891 `[1/8]` Pin Python SDK runtime dependency 2. #21893 `[2/8]` Generate Python SDK types from pinned runtime 3. #21895 `[3/8]` Run Python SDK tests in CI 4. #21896 `[4/8]` Define Python SDK public API surface 5. #21905 `[5/8]` Rename Python SDK package to `openai-codex` 6. #21910 `[6/8]` Add high-level Python SDK approval mode 7. #22014 `[7/8]` Add Python SDK app-server integration harness 8. This PR `[8/8]` Add Python SDK Ruff formatting ## Verification - Added `test_root_fmt_recipe_formats_rust_and_python_sdk` for the shared format recipe. - Ran `just fmt` after the recipe update. --------- Co-authored-by: Codex <noreply@openai.com>
101 lines
3.0 KiB
Python
101 lines
3.0 KiB
Python
import sys
|
|
from pathlib import Path
|
|
|
|
_EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
|
|
if str(_EXAMPLES_ROOT) not in sys.path:
|
|
sys.path.insert(0, str(_EXAMPLES_ROOT))
|
|
|
|
from _bootstrap import ensure_local_sdk_src, runtime_config
|
|
|
|
ensure_local_sdk_src()
|
|
|
|
import asyncio
|
|
|
|
from openai_codex import (
|
|
AsyncCodex,
|
|
TextInput,
|
|
)
|
|
from openai_codex.types import (
|
|
ThreadTokenUsageUpdatedNotification,
|
|
TurnCompletedNotification,
|
|
)
|
|
|
|
|
|
def _status_value(status: object | None) -> str:
|
|
return str(getattr(status, "value", status))
|
|
|
|
|
|
def _format_usage(usage: object | None) -> str:
|
|
if usage is None:
|
|
return "usage> (none)"
|
|
|
|
last = getattr(usage, "last", None)
|
|
total = getattr(usage, "total", None)
|
|
if last is None or total is None:
|
|
return f"usage> {usage}"
|
|
|
|
return (
|
|
"usage>\n"
|
|
f" last: input={last.input_tokens} output={last.output_tokens} reasoning={last.reasoning_output_tokens} total={last.total_tokens} cached={last.cached_input_tokens}\n"
|
|
f" total: input={total.input_tokens} output={total.output_tokens} reasoning={total.reasoning_output_tokens} total={total.total_tokens} cached={total.cached_input_tokens}"
|
|
)
|
|
|
|
|
|
async def main() -> None:
|
|
print("Codex async mini CLI. Type /exit to quit.")
|
|
|
|
async with AsyncCodex(config=runtime_config()) as codex:
|
|
thread = await codex.thread_start(
|
|
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
|
)
|
|
print("Thread:", thread.id)
|
|
|
|
while True:
|
|
try:
|
|
user_input = (await asyncio.to_thread(input, "you> ")).strip()
|
|
except EOFError:
|
|
break
|
|
|
|
if not user_input:
|
|
continue
|
|
if user_input in {"/exit", "/quit"}:
|
|
break
|
|
|
|
turn = await thread.turn(TextInput(user_input))
|
|
usage = None
|
|
status = None
|
|
error = None
|
|
printed_delta = False
|
|
|
|
print("assistant> ", end="", flush=True)
|
|
async for event in turn.stream():
|
|
payload = event.payload
|
|
if event.method == "item/agentMessage/delta":
|
|
delta = getattr(payload, "delta", "")
|
|
if delta:
|
|
print(delta, end="", flush=True)
|
|
printed_delta = True
|
|
continue
|
|
if isinstance(payload, ThreadTokenUsageUpdatedNotification):
|
|
usage = payload.token_usage
|
|
continue
|
|
if isinstance(payload, TurnCompletedNotification):
|
|
status = payload.turn.status
|
|
error = payload.turn.error
|
|
|
|
if printed_delta:
|
|
print()
|
|
else:
|
|
print("[no text]")
|
|
|
|
status_text = _status_value(status)
|
|
print(f"assistant.status> {status_text}")
|
|
if status_text == "failed":
|
|
print("assistant.error>", error)
|
|
|
|
print(_format_usage(usage))
|
|
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(main())
|