mirror of
https://github.com/openai/codex.git
synced 2026-04-27 16:15:09 +00:00
## TL;DR Add `thread.run(...)` / `async thread.run(...)` convenience methods to the Python SDK for the common case. - add `RunInput = Input | str` and `RunResult` with `final_response`, collected `items`, and optional `usage` - keep `thread.turn(...)` strict and lower-level for streaming, steering, interrupting, and raw generated `Turn` access - update Python SDK docs, quickstart examples, and tests for the sync and async convenience flows ## Validation - `python3 -m pytest sdk/python/tests/test_public_api_signatures.py sdk/python/tests/test_public_api_runtime_behavior.py` - `python3 -m pytest sdk/python/tests/test_real_app_server_integration.py -k 'thread_run_convenience or async_thread_run_convenience'` (skipped in this environment) --------- Co-authored-by: Codex <noreply@openai.com>
25 lines
652 B
Python
25 lines
652 B
Python
import sys
|
|
from pathlib import Path
|
|
|
|
_EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
|
|
if str(_EXAMPLES_ROOT) not in sys.path:
|
|
sys.path.insert(0, str(_EXAMPLES_ROOT))
|
|
|
|
from _bootstrap import (
|
|
ensure_local_sdk_src,
|
|
runtime_config,
|
|
server_label,
|
|
)
|
|
|
|
ensure_local_sdk_src()
|
|
|
|
from codex_app_server import Codex
|
|
|
|
with Codex(config=runtime_config()) as codex:
|
|
print("Server:", server_label(codex.metadata))
|
|
|
|
thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
|
result = thread.run("Say hello in one sentence.")
|
|
print("Items:", len(result.items))
|
|
print("Text:", result.final_response)
|