mirror of
https://github.com/openai/codex.git
synced 2026-04-25 07:05:38 +00:00
Remove the SDK alias/result layers so the wrapper surface returns canonical generated app-server models directly. - delete public type alias modules and regenerate v2_all.py against current schema - return InitializeResponse from metadata and generated Turn from run() - update docs, examples, notebook, and tests to use canonical generated models and repo-only text extraction helpers Validation: - PYTHONPATH=sdk/python/src python3 -m pytest sdk/python/tests - GH_TOKEN="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" RUN_REAL_CODEX_TESTS=1 PYTHONPATH=sdk/python/src python3 -m pytest sdk/python/tests -rs Co-authored-by: Codex <noreply@openai.com>
35 lines
1.1 KiB
Python
35 lines
1.1 KiB
Python
import sys
|
|
from pathlib import Path
|
|
|
|
_EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
|
|
if str(_EXAMPLES_ROOT) not in sys.path:
|
|
sys.path.insert(0, str(_EXAMPLES_ROOT))
|
|
|
|
from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config
|
|
|
|
ensure_local_sdk_src()
|
|
|
|
import asyncio
|
|
|
|
from codex_app_server import AsyncCodex, TextInput
|
|
|
|
|
|
async def main() -> None:
|
|
async with AsyncCodex(config=runtime_config()) as codex:
|
|
original = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
|
|
|
first_turn = await original.turn(TextInput("Tell me one fact about Saturn."))
|
|
_ = await first_turn.run()
|
|
print("Created thread:", original.id)
|
|
|
|
resumed = await codex.thread_resume(original.id)
|
|
second_turn = await resumed.turn(TextInput("Continue with one more fact."))
|
|
second = await second_turn.run()
|
|
persisted = await resumed.read(include_turns=True)
|
|
persisted_turn = find_turn_by_id(persisted.thread.turns, second.id)
|
|
print(assistant_text_from_turn(persisted_turn))
|
|
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(main())
|