mirror of
https://github.com/openai/codex.git
synced 2026-04-27 08:05:51 +00:00
Add Python SDK thread.run convenience methods (#15088)
## TL;DR Add `thread.run(...)` / `async thread.run(...)` convenience methods to the Python SDK for the common case. - add `RunInput = Input | str` and `RunResult` with `final_response`, collected `items`, and optional `usage` - keep `thread.turn(...)` strict and lower-level for streaming, steering, interrupting, and raw generated `Turn` access - update Python SDK docs, quickstart examples, and tests for the sync and async convenience flows ## Validation - `python3 -m pytest sdk/python/tests/test_public_api_signatures.py sdk/python/tests/test_public_api_runtime_behavior.py` - `python3 -m pytest sdk/python/tests/test_real_app_server_integration.py -k 'thread_run_convenience or async_thread_run_convenience'` (skipped in this environment) --------- Co-authored-by: Codex <noreply@openai.com>
This commit is contained in:
@@ -265,6 +265,36 @@ def test_real_thread_and_turn_start_smoke(runtime_env: PreparedRuntimeEnv) -> No
|
||||
assert isinstance(data["persisted_items_count"], int)
|
||||
|
||||
|
||||
def test_real_thread_run_convenience_smoke(runtime_env: PreparedRuntimeEnv) -> None:
|
||||
data = _run_json_python(
|
||||
runtime_env,
|
||||
textwrap.dedent(
|
||||
"""
|
||||
import json
|
||||
from codex_app_server import Codex
|
||||
|
||||
with Codex() as codex:
|
||||
thread = codex.thread_start(
|
||||
model="gpt-5.4",
|
||||
config={"model_reasoning_effort": "high"},
|
||||
)
|
||||
result = thread.run("say ok")
|
||||
print(json.dumps({
|
||||
"thread_id": thread.id,
|
||||
"final_response": result.final_response,
|
||||
"items_count": len(result.items),
|
||||
"has_usage": result.usage is not None,
|
||||
}))
|
||||
"""
|
||||
),
|
||||
)
|
||||
|
||||
assert isinstance(data["thread_id"], str) and data["thread_id"].strip()
|
||||
assert isinstance(data["final_response"], str) and data["final_response"].strip()
|
||||
assert isinstance(data["items_count"], int)
|
||||
assert isinstance(data["has_usage"], bool)
|
||||
|
||||
|
||||
def test_real_async_thread_turn_usage_and_ids_smoke(
|
||||
runtime_env: PreparedRuntimeEnv,
|
||||
) -> None:
|
||||
@@ -308,6 +338,42 @@ def test_real_async_thread_turn_usage_and_ids_smoke(
|
||||
assert isinstance(data["persisted_items_count"], int)
|
||||
|
||||
|
||||
def test_real_async_thread_run_convenience_smoke(
|
||||
runtime_env: PreparedRuntimeEnv,
|
||||
) -> None:
|
||||
data = _run_json_python(
|
||||
runtime_env,
|
||||
textwrap.dedent(
|
||||
"""
|
||||
import asyncio
|
||||
import json
|
||||
from codex_app_server import AsyncCodex
|
||||
|
||||
async def main():
|
||||
async with AsyncCodex() as codex:
|
||||
thread = await codex.thread_start(
|
||||
model="gpt-5.4",
|
||||
config={"model_reasoning_effort": "high"},
|
||||
)
|
||||
result = await thread.run("say ok")
|
||||
print(json.dumps({
|
||||
"thread_id": thread.id,
|
||||
"final_response": result.final_response,
|
||||
"items_count": len(result.items),
|
||||
"has_usage": result.usage is not None,
|
||||
}))
|
||||
|
||||
asyncio.run(main())
|
||||
"""
|
||||
),
|
||||
)
|
||||
|
||||
assert isinstance(data["thread_id"], str) and data["thread_id"].strip()
|
||||
assert isinstance(data["final_response"], str) and data["final_response"].strip()
|
||||
assert isinstance(data["items_count"], int)
|
||||
assert isinstance(data["has_usage"], bool)
|
||||
|
||||
|
||||
def test_notebook_bootstrap_resolves_sdk_and_runtime_from_unrelated_cwd(
|
||||
runtime_env: PreparedRuntimeEnv,
|
||||
) -> None:
|
||||
|
||||
Reference in New Issue
Block a user