mirror of
https://github.com/openai/codex.git
synced 2026-04-26 15:45:02 +00:00
## TL;DR WIP esp the examples Thin the Python SDK public surface so the wrapper layer returns canonical app-server generated models directly. - keeps `Codex` / `AsyncCodex` / `Thread` / `Turn` and input helpers, but removes alias-only type layers and custom result models - `metadata` now returns `InitializeResponse` and `run()` returns the generated app-server `Turn` - updates docs, examples, notebook, and tests to use canonical generated types and regenerates `v2_all.py` against current schema - keeps the pinned runtime-package integration flow and real integration coverage ## Validation - `PYTHONPATH=sdk/python/src python3 -m pytest sdk/python/tests` - `GH_TOKEN="$(gh auth token)" RUN_REAL_CODEX_TESTS=1 PYTHONPATH=sdk/python/src python3 -m pytest sdk/python/tests -rs` --------- Co-authored-by: Codex <noreply@openai.com>
97 lines
3.0 KiB
Python
97 lines
3.0 KiB
Python
import sys
|
|
from pathlib import Path
|
|
|
|
_EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
|
|
if str(_EXAMPLES_ROOT) not in sys.path:
|
|
sys.path.insert(0, str(_EXAMPLES_ROOT))
|
|
|
|
from _bootstrap import ensure_local_sdk_src, runtime_config
|
|
|
|
ensure_local_sdk_src()
|
|
|
|
import asyncio
|
|
|
|
from codex_app_server import (
|
|
AsyncCodex,
|
|
TextInput,
|
|
ThreadTokenUsageUpdatedNotification,
|
|
TurnCompletedNotification,
|
|
)
|
|
|
|
|
|
def _status_value(status: object | None) -> str:
|
|
return str(getattr(status, "value", status))
|
|
|
|
|
|
def _format_usage(usage: object | None) -> str:
|
|
if usage is None:
|
|
return "usage> (none)"
|
|
|
|
last = getattr(usage, "last", None)
|
|
total = getattr(usage, "total", None)
|
|
if last is None or total is None:
|
|
return f"usage> {usage}"
|
|
|
|
return (
|
|
"usage>\n"
|
|
f" last: input={last.input_tokens} output={last.output_tokens} reasoning={last.reasoning_output_tokens} total={last.total_tokens} cached={last.cached_input_tokens}\n"
|
|
f" total: input={total.input_tokens} output={total.output_tokens} reasoning={total.reasoning_output_tokens} total={total.total_tokens} cached={total.cached_input_tokens}"
|
|
)
|
|
|
|
|
|
async def main() -> None:
|
|
print("Codex async mini CLI. Type /exit to quit.")
|
|
|
|
async with AsyncCodex(config=runtime_config()) as codex:
|
|
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
|
print("Thread:", thread.id)
|
|
|
|
while True:
|
|
try:
|
|
user_input = (await asyncio.to_thread(input, "you> ")).strip()
|
|
except EOFError:
|
|
break
|
|
|
|
if not user_input:
|
|
continue
|
|
if user_input in {"/exit", "/quit"}:
|
|
break
|
|
|
|
turn = await thread.turn(TextInput(user_input))
|
|
usage = None
|
|
status = None
|
|
error = None
|
|
printed_delta = False
|
|
|
|
print("assistant> ", end="", flush=True)
|
|
async for event in turn.stream():
|
|
payload = event.payload
|
|
if event.method == "item/agentMessage/delta":
|
|
delta = getattr(payload, "delta", "")
|
|
if delta:
|
|
print(delta, end="", flush=True)
|
|
printed_delta = True
|
|
continue
|
|
if isinstance(payload, ThreadTokenUsageUpdatedNotification):
|
|
usage = payload.token_usage
|
|
continue
|
|
if isinstance(payload, TurnCompletedNotification):
|
|
status = payload.turn.status
|
|
error = payload.turn.error
|
|
|
|
if printed_delta:
|
|
print()
|
|
else:
|
|
print("[no text]")
|
|
|
|
status_text = _status_value(status)
|
|
print(f"assistant.status> {status_text}")
|
|
if status_text == "failed":
|
|
print("assistant.error>", error)
|
|
|
|
print(_format_usage(usage))
|
|
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(main())
|