mirror of
https://github.com/openai/codex.git
synced 2026-04-26 07:35:29 +00:00
## TL;DR WIP esp the examples Thin the Python SDK public surface so the wrapper layer returns canonical app-server generated models directly. - keeps `Codex` / `AsyncCodex` / `Thread` / `Turn` and input helpers, but removes alias-only type layers and custom result models - `metadata` now returns `InitializeResponse` and `run()` returns the generated app-server `Turn` - updates docs, examples, notebook, and tests to use canonical generated types and regenerates `v2_all.py` against current schema - keeps the pinned runtime-package integration flow and real integration coverage ## Validation - `PYTHONPATH=sdk/python/src python3 -m pytest sdk/python/tests` - `GH_TOKEN="$(gh auth token)" RUN_REAL_CODEX_TESTS=1 PYTHONPATH=sdk/python/src python3 -m pytest sdk/python/tests -rs` --------- Co-authored-by: Codex <noreply@openai.com>
65 lines
2.0 KiB
Python
65 lines
2.0 KiB
Python
from __future__ import annotations
|
|
|
|
import asyncio
|
|
import time
|
|
|
|
from codex_app_server.async_client import AsyncAppServerClient
|
|
|
|
|
|
def test_async_client_serializes_transport_calls() -> None:
|
|
async def scenario() -> int:
|
|
client = AsyncAppServerClient()
|
|
active = 0
|
|
max_active = 0
|
|
|
|
def fake_model_list(include_hidden: bool = False) -> bool:
|
|
nonlocal active, max_active
|
|
active += 1
|
|
max_active = max(max_active, active)
|
|
time.sleep(0.05)
|
|
active -= 1
|
|
return include_hidden
|
|
|
|
client._sync.model_list = fake_model_list # type: ignore[method-assign]
|
|
await asyncio.gather(client.model_list(), client.model_list())
|
|
return max_active
|
|
|
|
assert asyncio.run(scenario()) == 1
|
|
|
|
|
|
def test_async_stream_text_is_incremental_and_blocks_parallel_calls() -> None:
|
|
async def scenario() -> tuple[str, list[str], bool]:
|
|
client = AsyncAppServerClient()
|
|
|
|
def fake_stream_text(thread_id: str, text: str, params=None): # type: ignore[no-untyped-def]
|
|
yield "first"
|
|
time.sleep(0.03)
|
|
yield "second"
|
|
yield "third"
|
|
|
|
def fake_model_list(include_hidden: bool = False) -> str:
|
|
return "done"
|
|
|
|
client._sync.stream_text = fake_stream_text # type: ignore[method-assign]
|
|
client._sync.model_list = fake_model_list # type: ignore[method-assign]
|
|
|
|
stream = client.stream_text("thread-1", "hello")
|
|
first = await anext(stream)
|
|
|
|
blocked_before_stream_done = False
|
|
competing_call = asyncio.create_task(client.model_list())
|
|
await asyncio.sleep(0.01)
|
|
blocked_before_stream_done = not competing_call.done()
|
|
|
|
remaining: list[str] = []
|
|
async for item in stream:
|
|
remaining.append(item)
|
|
|
|
await competing_call
|
|
return first, remaining, blocked_before_stream_done
|
|
|
|
first, remaining, blocked = asyncio.run(scenario())
|
|
assert first == "first"
|
|
assert remaining == ["second", "third"]
|
|
assert blocked
|