[8/8] Add Python SDK Ruff formatting (#22021)

## Why

The Python SDK needs the same tight formatter/lint loop as the rest of
the repo: a safe Ruff autofix pass, Ruff formatting, editor save
behavior, and CI checks that catch drift. Without that loop, SDK changes
can land with formatting or import ordering that differs from what
reviewers and CI expect.

## What

- Add Ruff configuration to `sdk/python/pyproject.toml`, excluding
generated protocol code and notebooks from the normal lint/format pass.
- Update `just fmt` so it still formats Rust and also runs Python SDK
Ruff autofix and formatting.
- Add Python SDK CI steps for `ruff check` and `ruff format --check`
before pytest.
- Recommend the Ruff VS Code extension and enable Python
format/fix/organize-on-save so Cmd+S uses the same tooling.
- Apply the resulting Ruff formatting to SDK Python files, examples, and
the checked-in generated `v2_all.py` output emitted by the pinned
generator.
- Add a guard test for the `just fmt` recipe so it keeps working from
both Rust and Python SDK working directories.

## Stack

1. #21891 `[1/8]` Pin Python SDK runtime dependency
2. #21893 `[2/8]` Generate Python SDK types from pinned runtime
3. #21895 `[3/8]` Run Python SDK tests in CI
4. #21896 `[4/8]` Define Python SDK public API surface
5. #21905 `[5/8]` Rename Python SDK package to `openai-codex`
6. #21910 `[6/8]` Add high-level Python SDK approval mode
7. #22014 `[7/8]` Add Python SDK app-server integration harness
8. This PR `[8/8]` Add Python SDK Ruff formatting

## Verification

- Added `test_root_fmt_recipe_formats_rust_and_python_sdk` for the
shared format recipe.
- Ran `just fmt` after the recipe update.

---------

Co-authored-by: Codex <noreply@openai.com>
This commit is contained in:
Ahmed Ibrahim
2026-05-12 01:10:29 +03:00
committed by GitHub
parent 3e10e09e24
commit aa9e8f0262
51 changed files with 660 additions and 1163 deletions

View File

@@ -36,6 +36,8 @@ jobs:
python -m venv /tmp/uv
/tmp/uv/bin/python -m pip install uv==0.11.3
/tmp/uv/bin/uv sync --extra dev --frozen
/tmp/uv/bin/uv run --extra dev ruff check --output-format=github .
/tmp/uv/bin/uv run --extra dev ruff format --check .
/tmp/uv/bin/uv run --extra dev pytest
'

View File

@@ -1,6 +1,7 @@
{
"recommendations": [
"rust-lang.rust-analyzer",
"charliermarsh.ruff",
"tamasfe.even-better-toml",
"vadimcn.vscode-lldb",

View File

@@ -12,6 +12,14 @@
"editor.defaultFormatter": "tamasfe.even-better-toml",
"editor.formatOnSave": true,
},
"[python]": {
"editor.defaultFormatter": "charliermarsh.ruff",
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.fixAll.ruff": "explicit",
"source.organizeImports.ruff": "explicit",
},
},
// Array order for options in ~/.codex/config.toml such as `notify` and the
// `args` for an MCP server is significant, so we disable reordering.
"evenBetterToml.formatter.reorderArrays": false,

View File

@@ -30,9 +30,11 @@ app-server-test-client *args:
cargo build -p codex-cli
cargo run -p codex-app-server-test-client -- --codex-bin ./target/debug/codex "$@"
# format code
# Format Rust and Python SDK code.
fmt:
cargo fmt -- --config imports_granularity=Item 2>/dev/null
uv run --project ../sdk/python --extra dev ruff check --fix --fix-only ../sdk/python
uv run --project ../sdk/python --extra dev ruff format ../sdk/python
fix *args:
cargo clippy --fix --tests --allow-dirty "$@"

View File

@@ -197,15 +197,9 @@ def _download_release_archive(version: str, temp_root: Path) -> Path:
metadata = _release_metadata(version)
assets = metadata.get("assets")
if not isinstance(assets, list):
raise RuntimeSetupError(
f"Release {release_tag} returned malformed assets metadata."
)
raise RuntimeSetupError(f"Release {release_tag} returned malformed assets metadata.")
asset = next(
(
item
for item in assets
if isinstance(item, dict) and item.get("name") == asset_name
),
(item for item in assets if isinstance(item, dict) and item.get("name") == asset_name),
None,
)
if asset is None:
@@ -279,9 +273,7 @@ def _extract_runtime_binary(archive_path: Path, temp_root: Path) -> Path:
with zipfile.ZipFile(archive_path) as zip_file:
zip_file.extractall(extract_dir)
else:
raise RuntimeSetupError(
f"Unsupported release archive format: {archive_path.name}"
)
raise RuntimeSetupError(f"Unsupported release archive format: {archive_path.name}")
binary_name = runtime_binary_name()
archive_stem = archive_path.name.removesuffix(".tar.gz").removesuffix(".zip")
@@ -290,9 +282,7 @@ def _extract_runtime_binary(archive_path: Path, temp_root: Path) -> Path:
for path in extract_dir.rglob("*")
if path.is_file()
and (
path.name == binary_name
or path.name == archive_stem
or path.name.startswith("codex-")
path.name == binary_name or path.name == archive_stem or path.name.startswith("codex-")
)
]
if not candidates:

View File

@@ -22,7 +22,9 @@ async def main() -> None:
async with AsyncCodex(config=runtime_config()) as codex:
print("Server:", server_label(codex.metadata))
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
thread = await codex.thread_start(
model="gpt-5.4", config={"model_reasoning_effort": "high"}
)
result = await thread.run("Say hello in one sentence.")
print("Items:", len(result.items))
print("Text:", result.final_response)

View File

@@ -21,7 +21,9 @@ from openai_codex import AsyncCodex, TextInput
async def main() -> None:
async with AsyncCodex(config=runtime_config()) as codex:
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
thread = await codex.thread_start(
model="gpt-5.4", config={"model_reasoning_effort": "high"}
)
turn = await thread.turn(TextInput("Give 3 bullets about SIMD."))
result = await turn.run()
persisted = await thread.read(include_turns=True)

View File

@@ -21,7 +21,9 @@ from openai_codex import AsyncCodex, TextInput
async def main() -> None:
async with AsyncCodex(config=runtime_config()) as codex:
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
thread = await codex.thread_start(
model="gpt-5.4", config={"model_reasoning_effort": "high"}
)
turn = await thread.turn(TextInput("Explain SIMD in 3 short bullets."))
event_count = 0
@@ -44,7 +46,9 @@ async def main() -> None:
saw_delta = True
continue
if event.method == "turn/completed":
completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status))
completed_status = getattr(
event.payload.turn.status, "value", str(event.payload.turn.status)
)
if saw_delta:
print()

View File

@@ -40,7 +40,9 @@ with Codex(config=runtime_config()) as codex:
saw_delta = True
continue
if event.method == "turn/completed":
completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status))
completed_status = getattr(
event.payload.turn.status, "value", str(event.payload.turn.status)
)
if saw_delta:
print()

View File

@@ -5,7 +5,12 @@ _EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
if str(_EXAMPLES_ROOT) not in sys.path:
sys.path.insert(0, str(_EXAMPLES_ROOT))
from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config
from _bootstrap import (
assistant_text_from_turn,
ensure_local_sdk_src,
find_turn_by_id,
runtime_config,
)
ensure_local_sdk_src()
@@ -16,7 +21,9 @@ from openai_codex import AsyncCodex, TextInput
async def main() -> None:
async with AsyncCodex(config=runtime_config()) as codex:
original = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
original = await codex.thread_start(
model="gpt-5.4", config={"model_reasoning_effort": "high"}
)
first_turn = await original.turn(TextInput("Tell me one fact about Saturn."))
_ = await first_turn.run()

View File

@@ -5,7 +5,12 @@ _EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
if str(_EXAMPLES_ROOT) not in sys.path:
sys.path.insert(0, str(_EXAMPLES_ROOT))
from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config
from _bootstrap import (
assistant_text_from_turn,
ensure_local_sdk_src,
find_turn_by_id,
runtime_config,
)
ensure_local_sdk_src()

View File

@@ -16,8 +16,12 @@ from openai_codex import AsyncCodex, TextInput
async def main() -> None:
async with AsyncCodex(config=runtime_config()) as codex:
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
first = await (await thread.turn(TextInput("One sentence about structured planning."))).run()
thread = await codex.thread_start(
model="gpt-5.4", config={"model_reasoning_effort": "high"}
)
first = await (
await thread.turn(TextInput("One sentence about structured planning."))
).run()
second = await (await thread.turn(TextInput("Now restate it for a junior engineer."))).run()
reopened = await codex.thread_resume(thread.id)
@@ -36,7 +40,9 @@ async def main() -> None:
model="gpt-5.4",
config={"model_reasoning_effort": "high"},
)
resumed_result = await (await resumed.turn(TextInput("Continue in one short sentence."))).run()
resumed_result = await (
await resumed.turn(TextInput("Continue in one short sentence."))
).run()
resumed_info = f"{resumed_result.id} {resumed_result.status}"
except Exception as exc:
resumed_info = f"skipped({type(exc).__name__})"
@@ -44,7 +50,9 @@ async def main() -> None:
forked_info = "n/a"
try:
forked = await codex.thread_fork(unarchived.id, model="gpt-5.4")
forked_result = await (await forked.turn(TextInput("Take a different angle in one short sentence."))).run()
forked_result = await (
await forked.turn(TextInput("Take a different angle in one short sentence."))
).run()
forked_info = f"{forked_result.id} {forked_result.status}"
except Exception as exc:
forked_info = f"skipped({type(exc).__name__})"

View File

@@ -11,7 +11,6 @@ ensure_local_sdk_src()
from openai_codex import Codex, TextInput
with Codex(config=runtime_config()) as codex:
thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
first = thread.turn(TextInput("One sentence about structured planning.")).run()
@@ -41,7 +40,9 @@ with Codex(config=runtime_config()) as codex:
forked_info = "n/a"
try:
forked = codex.thread_fork(unarchived.id, model="gpt-5.4")
forked_result = forked.turn(TextInput("Take a different angle in one short sentence.")).run()
forked_result = forked.turn(
TextInput("Take a different angle in one short sentence.")
).run()
forked_info = f"{forked_result.id} {forked_result.status}"
except Exception as exc:
forked_info = f"skipped({type(exc).__name__})"

View File

@@ -23,7 +23,9 @@ REMOTE_IMAGE_URL = "https://raw.githubusercontent.com/github/explore/main/topics
async def main() -> None:
async with AsyncCodex(config=runtime_config()) as codex:
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
thread = await codex.thread_start(
model="gpt-5.4", config={"model_reasoning_effort": "high"}
)
turn = await thread.turn(
[
TextInput("What is in this image? Give 3 bullets."),

View File

@@ -23,11 +23,15 @@ from openai_codex import AsyncCodex, LocalImageInput, TextInput
async def main() -> None:
with temporary_sample_image_path() as image_path:
async with AsyncCodex(config=runtime_config()) as codex:
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
thread = await codex.thread_start(
model="gpt-5.4", config={"model_reasoning_effort": "high"}
)
turn = await thread.turn(
[
TextInput("Read this generated local image and summarize the colors/layout in 2 bullets."),
TextInput(
"Read this generated local image and summarize the colors/layout in 2 bullets."
),
LocalImageInput(str(image_path.resolve())),
]
)

View File

@@ -23,7 +23,9 @@ with temporary_sample_image_path() as image_path:
result = thread.turn(
[
TextInput("Read this generated local image and summarize the colors/layout in 2 bullets."),
TextInput(
"Read this generated local image and summarize the colors/layout in 2 bullets."
),
LocalImageInput(str(image_path.resolve())),
]
).run()

View File

@@ -60,7 +60,9 @@ async def retry_on_overload_async(
async def main() -> None:
async with AsyncCodex(config=runtime_config()) as codex:
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
thread = await codex.thread_start(
model="gpt-5.4", config={"model_reasoning_effort": "high"}
)
try:
result = await retry_on_overload_async(

View File

@@ -45,7 +45,9 @@ async def main() -> None:
print("Codex async mini CLI. Type /exit to quit.")
async with AsyncCodex(config=runtime_config()) as codex:
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
thread = await codex.thread_start(
model="gpt-5.4", config={"model_reasoning_effort": "high"}
)
print("Thread:", thread.id)
while True:

View File

@@ -49,7 +49,9 @@ PROMPT = (
async def main() -> None:
async with AsyncCodex(config=runtime_config()) as codex:
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
thread = await codex.thread_start(
model="gpt-5.4", config={"model_reasoning_effort": "high"}
)
turn = await thread.turn(
TextInput(PROMPT),
@@ -64,12 +66,16 @@ async def main() -> None:
try:
structured = json.loads(structured_text)
except json.JSONDecodeError as exc:
raise RuntimeError(f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}") from exc
raise RuntimeError(
f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}"
) from exc
summary = structured.get("summary")
actions = structured.get("actions")
if not isinstance(summary, str) or not isinstance(actions, list) or not all(
isinstance(action, str) for action in actions
if (
not isinstance(summary, str)
or not isinstance(actions, list)
or not all(isinstance(action, str) for action in actions)
):
raise RuntimeError(
f"Expected structured output with string summary/actions, got: {structured!r}"

View File

@@ -60,14 +60,20 @@ with Codex(config=runtime_config()) as codex:
try:
structured = json.loads(structured_text)
except json.JSONDecodeError as exc:
raise RuntimeError(f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}") from exc
raise RuntimeError(
f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}"
) from exc
summary = structured.get("summary")
actions = structured.get("actions")
if not isinstance(summary, str) or not isinstance(actions, list) or not all(
isinstance(action, str) for action in actions
if (
not isinstance(summary, str)
or not isinstance(actions, list)
or not all(isinstance(action, str) for action in actions)
):
raise RuntimeError(f"Expected structured output with string summary/actions, got: {structured!r}")
raise RuntimeError(
f"Expected structured output with string summary/actions, got: {structured!r}"
)
print("Status:", result.status)
print("summary:", summary)

View File

@@ -5,7 +5,12 @@ _EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
if str(_EXAMPLES_ROOT) not in sys.path:
sys.path.insert(0, str(_EXAMPLES_ROOT))
from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config
from _bootstrap import (
assistant_text_from_turn,
ensure_local_sdk_src,
find_turn_by_id,
runtime_config,
)
ensure_local_sdk_src()
@@ -35,7 +40,9 @@ PREFERRED_MODEL = "gpt-5.4"
def _pick_highest_model(models):
visible = [m for m in models if not m.hidden] or models
preferred = next((m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL), None)
preferred = next(
(m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL), None
)
if preferred is not None:
return preferred
known_names = {m.id for m in visible} | {m.model for m in visible}
@@ -100,7 +107,9 @@ async def main() -> None:
first_persisted_turn = find_turn_by_id(persisted.thread.turns, first.id)
print("agent.message:", assistant_text_from_turn(first_persisted_turn))
print("items:", 0 if first_persisted_turn is None else len(first_persisted_turn.items or []))
print(
"items:", 0 if first_persisted_turn is None else len(first_persisted_turn.items or [])
)
second_turn = await thread.turn(
TextInput("Return JSON for a safe feature-flag rollout plan."),
@@ -117,7 +126,10 @@ async def main() -> None:
second_persisted_turn = find_turn_by_id(persisted.thread.turns, second.id)
print("agent.message.params:", assistant_text_from_turn(second_persisted_turn))
print("items.params:", 0 if second_persisted_turn is None else len(second_persisted_turn.items or []))
print(
"items.params:",
0 if second_persisted_turn is None else len(second_persisted_turn.items or []),
)
if __name__ == "__main__":

View File

@@ -5,7 +5,12 @@ _EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
if str(_EXAMPLES_ROOT) not in sys.path:
sys.path.insert(0, str(_EXAMPLES_ROOT))
from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config
from _bootstrap import (
assistant_text_from_turn,
ensure_local_sdk_src,
find_turn_by_id,
runtime_config,
)
ensure_local_sdk_src()
@@ -33,7 +38,9 @@ PREFERRED_MODEL = "gpt-5.4"
def _pick_highest_model(models):
visible = [m for m in models if not m.hidden] or models
preferred = next((m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL), None)
preferred = next(
(m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL), None
)
if preferred is not None:
return preferred
known_names = {m.id for m in visible} | {m.model for m in visible}

View File

@@ -20,8 +20,12 @@ from openai_codex import AsyncCodex, TextInput
async def main() -> None:
async with AsyncCodex(config=runtime_config()) as codex:
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
steer_turn = await thread.turn(TextInput("Count from 1 to 40 with commas, then one summary sentence."))
thread = await codex.thread_start(
model="gpt-5.4", config={"model_reasoning_effort": "high"}
)
steer_turn = await thread.turn(
TextInput("Count from 1 to 40 with commas, then one summary sentence.")
)
steer_result = "sent"
try:
_ = await steer_turn.steer(TextInput("Keep it brief and stop after 10 numbers."))
@@ -35,11 +39,17 @@ async def main() -> None:
steer_event_count += 1
if event.method == "turn/completed":
steer_completed_turn = event.payload.turn
steer_completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status))
steer_completed_status = getattr(
event.payload.turn.status, "value", str(event.payload.turn.status)
)
steer_preview = assistant_text_from_turn(steer_completed_turn).strip() or "[no assistant text]"
steer_preview = (
assistant_text_from_turn(steer_completed_turn).strip() or "[no assistant text]"
)
interrupt_turn = await thread.turn(TextInput("Count from 1 to 200 with commas, then one summary sentence."))
interrupt_turn = await thread.turn(
TextInput("Count from 1 to 200 with commas, then one summary sentence.")
)
interrupt_result = "sent"
try:
_ = await interrupt_turn.interrupt()
@@ -53,9 +63,13 @@ async def main() -> None:
interrupt_event_count += 1
if event.method == "turn/completed":
interrupt_completed_turn = event.payload.turn
interrupt_completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status))
interrupt_completed_status = getattr(
event.payload.turn.status, "value", str(event.payload.turn.status)
)
interrupt_preview = assistant_text_from_turn(interrupt_completed_turn).strip() or "[no assistant text]"
interrupt_preview = (
assistant_text_from_turn(interrupt_completed_turn).strip() or "[no assistant text]"
)
print("steer.result:", steer_result)
print("steer.final.status:", steer_completed_status)

View File

@@ -17,7 +17,9 @@ from openai_codex import Codex, TextInput
with Codex(config=runtime_config()) as codex:
thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
steer_turn = thread.turn(TextInput("Count from 1 to 40 with commas, then one summary sentence."))
steer_turn = thread.turn(
TextInput("Count from 1 to 40 with commas, then one summary sentence.")
)
steer_result = "sent"
try:
_ = steer_turn.steer(TextInput("Keep it brief and stop after 10 numbers."))
@@ -31,11 +33,15 @@ with Codex(config=runtime_config()) as codex:
steer_event_count += 1
if event.method == "turn/completed":
steer_completed_turn = event.payload.turn
steer_completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status))
steer_completed_status = getattr(
event.payload.turn.status, "value", str(event.payload.turn.status)
)
steer_preview = assistant_text_from_turn(steer_completed_turn).strip() or "[no assistant text]"
interrupt_turn = thread.turn(TextInput("Count from 1 to 200 with commas, then one summary sentence."))
interrupt_turn = thread.turn(
TextInput("Count from 1 to 200 with commas, then one summary sentence.")
)
interrupt_result = "sent"
try:
_ = interrupt_turn.interrupt()
@@ -49,9 +55,13 @@ with Codex(config=runtime_config()) as codex:
interrupt_event_count += 1
if event.method == "turn/completed":
interrupt_completed_turn = event.payload.turn
interrupt_completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status))
interrupt_completed_status = getattr(
event.payload.turn.status, "value", str(event.payload.turn.status)
)
interrupt_preview = assistant_text_from_turn(interrupt_completed_turn).strip() or "[no assistant text]"
interrupt_preview = (
assistant_text_from_turn(interrupt_completed_turn).strip() or "[no assistant text]"
)
print("steer.result:", steer_result)
print("steer.final.status:", steer_completed_status)

View File

@@ -2,7 +2,6 @@ from __future__ import annotations
import contextlib
import importlib.util
import os
import sys
import tempfile
import zlib
@@ -107,11 +106,15 @@ def temporary_sample_image_path() -> Iterator[Path]:
def server_label(metadata: object) -> str:
server = getattr(metadata, "serverInfo", None)
server_name = ((getattr(server, "name", None) or "") if server is not None else "").strip()
server_version = ((getattr(server, "version", None) or "") if server is not None else "").strip()
server_version = (
(getattr(server, "version", None) or "") if server is not None else ""
).strip()
if server_name and server_version:
return f"{server_name} {server_version}"
user_agent = ((getattr(metadata, "userAgent", None) or "") if metadata is not None else "").strip()
user_agent = (
(getattr(metadata, "userAgent", None) or "") if metadata is not None else ""
).strip()
return user_agent or "unknown"

View File

@@ -30,7 +30,7 @@ Repository = "https://github.com/openai/codex"
Issues = "https://github.com/openai/codex/issues"
[project.optional-dependencies]
dev = ["pytest>=8.0", "datamodel-code-generator==0.31.2", "ruff>=0.11"]
dev = ["pytest>=8.0", "datamodel-code-generator==0.31.2", "ruff>=0.15.8"]
[tool.hatch.build]
exclude = [
@@ -61,6 +61,29 @@ include = [
addopts = "-q"
testpaths = ["tests"]
[tool.ruff]
target-version = "py310"
required-version = ">=0.15.8"
line-length = 100
extend-exclude = [
"notebooks/**",
"src/openai_codex/generated/**",
]
[tool.ruff.lint]
select = ["E", "F", "I", "B", "C4"]
ignore = ["E501"]
preview = true
extend-safe-fixes = ["ALL"]
unfixable = ["F841"]
[tool.ruff.lint.per-file-ignores]
"examples/**/*.py" = ["E402"]
"tests/test_real_app_server_integration.py" = ["E402"]
[tool.ruff.lint.isort]
combine-as-imports = true
[tool.uv]
exclude-newer = "7 days"
exclude-newer-package = { openai-codex-cli-bin = "2026-05-10T00:00:00Z" }

View File

@@ -88,9 +88,7 @@ def pinned_runtime_version() -> str:
pyproject_text = sdk_pyproject_path().read_text()
match = re.search(r"(?ms)^dependencies = \[(.*?)\]$", pyproject_text)
if match is None:
raise RuntimeError(
"Could not find dependencies array in sdk/python/pyproject.toml"
)
raise RuntimeError("Could not find dependencies array in sdk/python/pyproject.toml")
pins = re.findall(
rf'"{re.escape(RUNTIME_DISTRIBUTION_NAME)}==([^"]+)"',
@@ -126,8 +124,7 @@ def pinned_runtime_codex_path() -> Path:
from codex_cli_bin import bundled_codex_path
except ImportError as exc:
raise RuntimeError(
f"Installed {RUNTIME_DISTRIBUTION_NAME} package does not expose "
"bundled_codex_path."
f"Installed {RUNTIME_DISTRIBUTION_NAME} package does not expose bundled_codex_path."
) from exc
codex_path = bundled_codex_path()
@@ -148,9 +145,7 @@ def normalize_codex_version(version: str) -> str:
normalized = re.sub(r"-rc\.?([0-9]+)$", r"rc\1", normalized)
if not re.fullmatch(r"[0-9]+(?:\.[0-9]+)*(?:(?:a|b|rc)[0-9]+)?", normalized):
raise RuntimeError(
f"Could not normalize Codex version {version!r} to a PEP 440 version"
)
raise RuntimeError(f"Could not normalize Codex version {version!r} to a PEP 440 version")
return normalized
@@ -231,9 +226,7 @@ def _rewrite_project_name(pyproject_text: str, name: str) -> str:
def _rewrite_sdk_runtime_dependency(pyproject_text: str, runtime_version: str) -> str:
match = re.search(r"^dependencies = \[(.*?)\]$", pyproject_text, flags=re.MULTILINE)
if match is None:
raise RuntimeError(
"Could not find dependencies array in sdk/python/pyproject.toml"
)
raise RuntimeError("Could not find dependencies array in sdk/python/pyproject.toml")
raw_items = [item.strip() for item in match.group(1).split(",") if item.strip()]
raw_items = [
@@ -285,9 +278,7 @@ def stage_python_runtime_package(
out_bin.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(binary_path, out_bin)
if not _is_windows():
out_bin.chmod(
out_bin.stat().st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
out_bin.chmod(out_bin.stat().st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
for resource_binary in resource_binaries:
# Some release targets need helper executables beside the main binary
# (for example Linux bwrap or Windows sandbox helpers). Keep this
@@ -361,11 +352,7 @@ def _enum_literals(value: Any) -> list[str] | None:
if not isinstance(value, dict):
return None
enum = value.get("enum")
if (
not isinstance(enum, list)
or not enum
or not all(isinstance(item, str) for item in enum)
):
if not isinstance(enum, list) or not enum or not all(isinstance(item, str) for item in enum):
return None
return list(enum)
@@ -403,11 +390,7 @@ def _variant_definition_name(base: str, variant: dict[str, Any]) -> str | None:
return f"{_to_pascal_case(pascal or key)}{base}"
required = variant.get("required")
if (
isinstance(required, list)
and len(required) == 1
and isinstance(required[0], str)
):
if isinstance(required, list) and len(required) == 1 and isinstance(required[0], str):
return f"{_to_pascal_case(required[0])}{base}"
enum_literals = _enum_literals(variant)
@@ -419,9 +402,7 @@ def _variant_definition_name(base: str, variant: dict[str, Any]) -> str | None:
return None
def _variant_collision_key(
base: str, variant: dict[str, Any], generated_name: str
) -> str:
def _variant_collision_key(base: str, variant: dict[str, Any], generated_name: str) -> str:
parts = [f"base={base}", f"generated={generated_name}"]
props = variant.get("properties")
if isinstance(props, dict):
@@ -433,11 +414,7 @@ def _variant_collision_key(
parts.append(f"only_property={next(iter(props))}")
required = variant.get("required")
if (
isinstance(required, list)
and len(required) == 1
and isinstance(required[0], str)
):
if isinstance(required, list) and len(required) == 1 and isinstance(required[0], str):
parts.append(f"required_only={required[0]}")
enum_literals = _enum_literals(variant)
@@ -619,13 +596,9 @@ def generate_v2_all(schema_dir: Path) -> None:
def _notification_specs(schema_dir: Path) -> list[tuple[str, str]]:
"""Map each server notification method to its generated payload model class."""
server_notifications = json.loads(
(schema_dir / "ServerNotification.json").read_text()
)
server_notifications = json.loads((schema_dir / "ServerNotification.json").read_text())
one_of = server_notifications.get("oneOf", [])
generated_source = (
sdk_root() / "src" / "openai_codex" / "generated" / "v2_all.py"
).read_text()
generated_source = (sdk_root() / "src" / "openai_codex" / "generated" / "v2_all.py").read_text()
specs: list[tuple[str, str]] = []
@@ -662,9 +635,7 @@ def _notification_turn_id_specs(
specs: list[tuple[str, str]],
) -> tuple[list[str], list[str]]:
"""Classify notification payloads by where their turn id is carried."""
server_notifications = json.loads(
(schema_dir / "ServerNotification.json").read_text()
)
server_notifications = json.loads((schema_dir / "ServerNotification.json").read_text())
definitions = server_notifications.get("definitions", {})
if not isinstance(definitions, dict):
return ([], [])
@@ -699,13 +670,7 @@ def _type_tuple_source(class_names: list[str]) -> str:
def generate_notification_registry(schema_dir: Path) -> None:
"""Regenerate notification dispatch metadata from the runtime notification schema."""
out = (
sdk_root()
/ "src"
/ "openai_codex"
/ "generated"
/ "notification_registry.py"
)
out = sdk_root() / "src" / "openai_codex" / "generated" / "notification_registry.py"
specs = _notification_specs(schema_dir)
class_names = sorted({class_name for _, class_name in specs})
direct_turn_id_types, nested_turn_types = _notification_turn_id_specs(
@@ -787,9 +752,7 @@ class PublicFieldSpec:
class CliOps:
generate_types: Callable[[], None]
stage_python_sdk_package: Callable[[Path, str], Path]
stage_python_runtime_package: Callable[
[Path, str, Path, str | None, Sequence[Path]], Path
]
stage_python_runtime_package: Callable[[Path, str, Path, str | None, Sequence[Path]], Path]
current_sdk_version: Callable[[], str]
@@ -891,14 +854,9 @@ def _approval_mode_override_signature_lines() -> list[str]:
return [" approval_mode: ApprovalMode | None = None,"]
def _approval_mode_assignment_line(
helper_name: str, *, indent: str = " "
) -> str:
def _approval_mode_assignment_line(helper_name: str, *, indent: str = " ") -> str:
"""Return the local mapping from public mode to app-server params."""
return (
f"{indent}approval_policy, approvals_reviewer = "
f"{helper_name}(approval_mode)"
)
return f"{indent}approval_policy, approvals_reviewer = {helper_name}(approval_mode)"
def _approval_mode_model_arg_lines(*, indent: str = " ") -> list[str]:
@@ -909,9 +867,7 @@ def _approval_mode_model_arg_lines(*, indent: str = " ") -> list[str]
]
def _model_arg_lines(
fields: list[PublicFieldSpec], *, indent: str = " "
) -> list[str]:
def _model_arg_lines(fields: list[PublicFieldSpec], *, indent: str = " ") -> list[str]:
return [f"{indent}{field.wire_name}={field.py_name}," for field in fields]
@@ -1224,9 +1180,7 @@ def build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Single SDK maintenance entrypoint")
subparsers = parser.add_subparsers(dest="command", required=True)
subparsers.add_parser(
"generate-types", help="Regenerate Python protocol-derived types"
)
subparsers.add_parser("generate-types", help="Regenerate Python protocol-derived types")
stage_sdk_parser = subparsers.add_parser(
"stage-sdk",
@@ -1324,9 +1278,7 @@ def _resolve_codex_version(args: argparse.Namespace) -> str:
normalized_versions = [normalize_codex_version(version) for version in versions]
if len(set(normalized_versions)) != 1:
raise RuntimeError(
"SDK and runtime package versions must match; pass one --codex-version"
)
raise RuntimeError("SDK and runtime package versions must match; pass one --codex-version")
return normalized_versions[0]

View File

@@ -1,18 +1,4 @@
from .client import AppServerConfig
from .errors import (
AppServerError,
AppServerRpcError,
InternalRpcError,
InvalidParamsError,
InvalidRequestError,
JsonRpcError,
MethodNotFoundError,
ParseError,
RetryLimitExceededError,
ServerBusyError,
TransportClosedError,
is_retryable_error,
)
from ._version import __version__
from .api import (
ApprovalMode,
AsyncCodex,
@@ -30,8 +16,22 @@ from .api import (
Thread,
TurnHandle,
)
from .client import AppServerConfig
from .errors import (
AppServerError,
AppServerRpcError,
InternalRpcError,
InvalidParamsError,
InvalidRequestError,
JsonRpcError,
MethodNotFoundError,
ParseError,
RetryLimitExceededError,
ServerBusyError,
TransportClosedError,
is_retryable_error,
)
from .retry import retry_on_overload
from ._version import __version__
__all__ = [
"__version__",

View File

@@ -122,9 +122,7 @@ class MessageRouter:
if notification.method == "turn/completed":
self._pending_turn_notifications.pop(turn_id, None)
return
self._pending_turn_notifications.setdefault(turn_id, deque()).append(
notification
)
self._pending_turn_notifications.setdefault(turn_id, deque()).append(notification)
return
turn_queue.put(notification)

View File

@@ -1,8 +1,7 @@
from __future__ import annotations
import re
from importlib.metadata import PackageNotFoundError
from importlib.metadata import version as distribution_version
from importlib.metadata import PackageNotFoundError, version as distribution_version
from pathlib import Path
DISTRIBUTION_NAME = "openai-codex"

View File

@@ -5,6 +5,23 @@ from dataclasses import dataclass
from enum import Enum
from typing import AsyncIterator, Iterator, NoReturn
from ._inputs import (
ImageInput as ImageInput,
Input,
InputItem as InputItem,
LocalImageInput as LocalImageInput,
MentionInput as MentionInput,
RunInput,
SkillInput as SkillInput,
TextInput as TextInput,
_normalize_run_input,
_to_wire_input,
)
from ._run import (
RunResult,
_collect_async_run_result,
_collect_run_result,
)
from .async_client import AsyncAppServerClient
from .client import AppServerClient, AppServerConfig
from .generated.v2_all import (
@@ -30,8 +47,8 @@ from .generated.v2_all import (
ThreadSortKey,
ThreadSource,
ThreadSourceKind,
ThreadStartSource,
ThreadStartParams,
ThreadStartSource,
Turn as AppServerTurn,
TurnCompletedNotification,
TurnInterruptResponse,
@@ -39,23 +56,6 @@ from .generated.v2_all import (
TurnSteerResponse,
)
from .models import InitializeResponse, JsonObject, Notification, ServerInfo
from ._inputs import (
ImageInput as ImageInput,
Input,
InputItem as InputItem,
LocalImageInput as LocalImageInput,
MentionInput as MentionInput,
RunInput,
SkillInput as SkillInput,
TextInput as TextInput,
_normalize_run_input,
_to_wire_input,
)
from ._run import (
RunResult,
_collect_async_run_result,
_collect_run_result,
)
def _split_user_agent(user_agent: str) -> tuple[str | None, str | None]:
@@ -151,11 +151,7 @@ class Codex:
normalized_server_name = (server_name or "").strip()
normalized_server_version = (server_version or "").strip()
if (
not user_agent
or not normalized_server_name
or not normalized_server_version
):
if not user_agent or not normalized_server_name or not normalized_server_version:
raise RuntimeError(
"initialize response missing required metadata "
f"(user_agent={user_agent!r}, server_name={normalized_server_name!r}, server_version={normalized_server_version!r})"
@@ -262,9 +258,7 @@ class Codex:
sandbox: SandboxMode | None = None,
service_tier: str | None = None,
) -> Thread:
approval_policy, approvals_reviewer = _approval_mode_override_settings(
approval_mode
)
approval_policy, approvals_reviewer = _approval_mode_override_settings(approval_mode)
params = ThreadResumeParams(
thread_id=thread_id,
approval_policy=approval_policy,
@@ -298,9 +292,7 @@ class Codex:
service_tier: str | None = None,
thread_source: ThreadSource | None = None,
) -> Thread:
approval_policy, approvals_reviewer = _approval_mode_override_settings(
approval_mode
)
approval_policy, approvals_reviewer = _approval_mode_override_settings(approval_mode)
params = ThreadForkParams(
thread_id=thread_id,
approval_policy=approval_policy,
@@ -470,9 +462,7 @@ class AsyncCodex:
service_tier: str | None = None,
) -> AsyncThread:
await self._ensure_initialized()
approval_policy, approvals_reviewer = _approval_mode_override_settings(
approval_mode
)
approval_policy, approvals_reviewer = _approval_mode_override_settings(approval_mode)
params = ThreadResumeParams(
thread_id=thread_id,
approval_policy=approval_policy,
@@ -507,9 +497,7 @@ class AsyncCodex:
thread_source: ThreadSource | None = None,
) -> AsyncThread:
await self._ensure_initialized()
approval_policy, approvals_reviewer = _approval_mode_override_settings(
approval_mode
)
approval_policy, approvals_reviewer = _approval_mode_override_settings(approval_mode)
params = ThreadForkParams(
thread_id=thread_id,
approval_policy=approval_policy,
@@ -597,9 +585,7 @@ class Thread:
summary: ReasoningSummary | None = None,
) -> TurnHandle:
wire_input = _to_wire_input(input)
approval_policy, approvals_reviewer = _approval_mode_override_settings(
approval_mode
)
approval_policy, approvals_reviewer = _approval_mode_override_settings(approval_mode)
params = TurnStartParams(
thread_id=self.id,
input=wire_input,
@@ -683,9 +669,7 @@ class AsyncThread:
) -> AsyncTurnHandle:
await self._codex._ensure_initialized()
wire_input = _to_wire_input(input)
approval_policy, approvals_reviewer = _approval_mode_override_settings(
approval_mode
)
approval_policy, approvals_reviewer = _approval_mode_override_settings(approval_mode)
params = TurnStartParams(
thread_id=self.id,
input=wire_input,
@@ -711,9 +695,7 @@ class AsyncThread:
async def read(self, *, include_turns: bool = False) -> ThreadReadResponse:
await self._codex._ensure_initialized()
return await self._codex._client.thread_read(
self.id, include_turns=include_turns
)
return await self._codex._client.thread_read(self.id, include_turns=include_turns)
async def set_name(self, name: str) -> ThreadSetNameResponse:
await self._codex._ensure_initialized()
@@ -758,10 +740,7 @@ class TurnHandle:
try:
for event in stream:
payload = event.payload
if (
isinstance(payload, TurnCompletedNotification)
and payload.turn.id == self.id
):
if isinstance(payload, TurnCompletedNotification) and payload.turn.id == self.id:
completed = payload
finally:
stream.close()
@@ -812,10 +791,7 @@ class AsyncTurnHandle:
try:
async for event in stream:
payload = event.payload
if (
isinstance(payload, TurnCompletedNotification)
and payload.turn.id == self.id
):
if isinstance(payload, TurnCompletedNotification) and payload.turn.id == self.id:
completed = payload
finally:
await stream.aclose()

View File

@@ -127,9 +127,7 @@ class AsyncAppServerClient:
"""List threads using the wrapped sync client."""
return await self._call_sync(self._sync.thread_list, params)
async def thread_read(
self, thread_id: str, include_turns: bool = False
) -> ThreadReadResponse:
async def thread_read(self, thread_id: str, include_turns: bool = False) -> ThreadReadResponse:
"""Read a thread using the wrapped sync client."""
return await self._call_sync(self._sync.thread_read, thread_id, include_turns)
@@ -164,13 +162,9 @@ class AsyncAppServerClient:
params: V2TurnStartParams | JsonObject | None = None,
) -> TurnStartResponse:
"""Start a turn using the wrapped sync client."""
return await self._call_sync(
self._sync.turn_start, thread_id, input_items, params
)
return await self._call_sync(self._sync.turn_start, thread_id, input_items, params)
async def turn_interrupt(
self, thread_id: str, turn_id: str
) -> TurnInterruptResponse:
async def turn_interrupt(self, thread_id: str, turn_id: str) -> TurnInterruptResponse:
"""Interrupt a turn using the wrapped sync client."""
return await self._call_sync(self._sync.turn_interrupt, thread_id, turn_id)

View File

@@ -12,6 +12,8 @@ from typing import Callable, Iterator, TypeVar
from pydantic import BaseModel
from ._message_router import MessageRouter
from ._version import __version__ as SDK_VERSION
from .errors import AppServerError, TransportClosedError
from .generated.notification_registry import NOTIFICATION_MODELS
from .generated.v2_all import (
@@ -43,9 +45,7 @@ from .models import (
Notification,
UnknownNotification,
)
from ._message_router import MessageRouter
from .retry import retry_on_overload
from ._version import __version__ as SDK_VERSION
ModelT = TypeVar("ModelT", bound=BaseModel)
ApprovalHandler = Callable[[str, JsonObject | None], JsonObject]
@@ -76,9 +76,7 @@ def _params_dict(
return dumped
if isinstance(params, dict):
return params
raise TypeError(
f"Expected generated params model or dict, got {type(params).__name__}"
)
raise TypeError(f"Expected generated params model or dict, got {type(params).__name__}")
def _installed_codex_path() -> Path:
@@ -248,9 +246,7 @@ class AppServerClient:
waiter = self._router.create_response_waiter(request_id)
try:
self._write_message(
{"id": request_id, "method": method, "params": params or {}}
)
self._write_message({"id": request_id, "method": method, "params": params or {}})
except BaseException:
self._router.discard_response_waiter(request_id)
raise
@@ -293,20 +289,14 @@ class AppServerClient:
params: V2ThreadResumeParams | JsonObject | None = None,
) -> ThreadResumeResponse:
payload = {"threadId": thread_id, **_params_dict(params)}
return self.request(
"thread/resume", payload, response_model=ThreadResumeResponse
)
return self.request("thread/resume", payload, response_model=ThreadResumeResponse)
def thread_list(
self, params: V2ThreadListParams | JsonObject | None = None
) -> ThreadListResponse:
return self.request(
"thread/list", _params_dict(params), response_model=ThreadListResponse
)
return self.request("thread/list", _params_dict(params), response_model=ThreadListResponse)
def thread_read(
self, thread_id: str, include_turns: bool = False
) -> ThreadReadResponse:
def thread_read(self, thread_id: str, include_turns: bool = False) -> ThreadReadResponse:
return self.request(
"thread/read",
{"threadId": thread_id, "includeTurns": include_turns},
@@ -461,16 +451,12 @@ class AppServerClient:
model = NOTIFICATION_MODELS.get(method)
if model is None:
return Notification(
method=method, payload=UnknownNotification(params=params_dict)
)
return Notification(method=method, payload=UnknownNotification(params=params_dict))
try:
payload = model.model_validate(params_dict)
except Exception: # noqa: BLE001
return Notification(
method=method, payload=UnknownNotification(params=params_dict)
)
return Notification(method=method, payload=UnknownNotification(params=params_dict))
return Notification(method=method, payload=payload)
def _normalize_input_items(
@@ -483,9 +469,7 @@ class AppServerClient:
return [input_items]
return input_items
def _default_approval_handler(
self, method: str, params: JsonObject | None
) -> JsonObject:
def _default_approval_handler(self, method: str, params: JsonObject | None) -> JsonObject:
"""Accept approval requests when the caller did not provide a handler."""
if method == "item/commandExecution/requestApproval":
return {"decision": "accept"}

View File

@@ -66,11 +66,7 @@ def _is_server_overloaded(data: Any) -> bool:
return data.lower() == "server_overloaded"
if isinstance(data, dict):
direct = (
data.get("codex_error_info")
or data.get("codexErrorInfo")
or data.get("errorInfo")
)
direct = data.get("codex_error_info") or data.get("codexErrorInfo") or data.get("errorInfo")
if isinstance(direct, str) and direct.lower() == "server_overloaded":
return True
if isinstance(direct, dict):

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,6 @@ from typing import Any
from openai_codex import AppServerConfig
Json = dict[str, Any]

View File

@@ -11,6 +11,7 @@ from app_server_harness import (
ev_response_created,
sse,
)
from openai_codex.generated.v2_all import (
AgentMessageDeltaNotification,
ItemCompletedNotification,

View File

@@ -3,9 +3,10 @@ from __future__ import annotations
import asyncio
from app_server_harness import AppServerHarness
from app_server_helpers import response_approval_policy
from openai_codex import ApprovalMode, AsyncCodex, Codex
from openai_codex.generated.v2_all import AskForApprovalValue, ThreadResumeParams
from app_server_helpers import response_approval_policy
def test_thread_resume_inherits_deny_all_approval_mode(tmp_path) -> None:

View File

@@ -1,9 +1,10 @@
from __future__ import annotations
from app_server_harness import AppServerHarness
from openai_codex import Codex, ImageInput, LocalImageInput, SkillInput, TextInput
from app_server_helpers import TINY_PNG_BYTES
from openai_codex import Codex, ImageInput, LocalImageInput, SkillInput, TextInput
def test_remote_image_input_reaches_responses_api(
tmp_path,
@@ -28,8 +29,7 @@ def test_remote_image_input_reaches_responses_api(
assert {
"final_response": result.final_response,
"contains_user_prompt": "Describe the remote image."
in request.message_input_texts("user"),
"contains_user_prompt": "Describe the remote image." in request.message_input_texts("user"),
"image_urls": request.message_image_urls("user"),
} == {
"final_response": "remote image received",
@@ -62,8 +62,7 @@ def test_local_image_input_reaches_responses_api(
assert {
"final_response": result.final_response,
"contains_user_prompt": "Describe the local image."
in request.message_input_texts("user"),
"contains_user_prompt": "Describe the local image." in request.message_input_texts("user"),
"image_url_is_png_data_url": request.message_image_urls("user")[-1].startswith(
"data:image/png;base64,"
),
@@ -81,9 +80,7 @@ def test_skill_input_injects_loaded_skill_body(tmp_path) -> None:
with AppServerHarness(tmp_path) as harness:
skill_file = harness.workspace / ".agents" / "skills" / "demo" / "SKILL.md"
skill_file.parent.mkdir(parents=True)
skill_file.write_text(
f"---\nname: demo\ndescription: demo skill\n---\n\n{skill_body}\n"
)
skill_file.write_text(f"---\nname: demo\ndescription: demo skill\n---\n\n{skill_body}\n")
skill_path = skill_file.resolve()
harness.responses.enqueue_assistant_message(
"skill received",
@@ -100,9 +97,7 @@ def test_skill_input_injects_loaded_skill_body(tmp_path) -> None:
request = harness.responses.single_request()
skill_blocks = [
text
for text in request.message_input_texts("user")
if text.startswith("<skill>")
text for text in request.message_input_texts("user") if text.startswith("<skill>")
]
assert {
"final_response": result.final_response,

View File

@@ -3,9 +3,10 @@ from __future__ import annotations
import asyncio
from app_server_harness import AppServerHarness
from openai_codex import AsyncCodex, Codex
from app_server_helpers import request_kind
from openai_codex import AsyncCodex, Codex
def _thread_message_summary(read_response) -> list[tuple[str, str]]:
"""Return persisted user/agent messages from a thread read response."""
@@ -58,9 +59,7 @@ def test_thread_list_filters_archived_threads(tmp_path) -> None:
expected_ids = {active_thread.id, archived_thread.id}
assert {
"active_ids": sorted(
thread.id for thread in active_list.data if thread.id in expected_ids
),
"active_ids": sorted(thread.id for thread in active_list.data if thread.id in expected_ids),
"archived_ids": sorted(
thread.id for thread in archived_list.data if thread.id in expected_ids
),

View File

@@ -3,7 +3,6 @@ from __future__ import annotations
import asyncio
import pytest
from app_server_harness import (
AppServerHarness,
ev_assistant_message,
@@ -13,13 +12,14 @@ from app_server_harness import (
ev_response_created,
sse,
)
from openai_codex import AsyncCodex, Codex
from openai_codex.generated.v2_all import MessagePhase
from app_server_helpers import (
agent_message_texts_from_items,
assistant_message_with_phase,
)
from openai_codex import AsyncCodex, Codex
from openai_codex.generated.v2_all import MessagePhase
def test_sync_thread_run_uses_mock_responses(
tmp_path,
@@ -250,9 +250,7 @@ def test_async_run_result_uses_last_unknown_phase_message(tmp_path) -> None:
)
async with AsyncCodex(config=harness.app_server_config()) as codex:
result = await (await codex.thread_start()).run(
"case: async last unknown phase"
)
result = await (await codex.thread_start()).run("case: async last unknown phase")
assert {
"final_response": result.final_response,
@@ -288,9 +286,7 @@ def test_async_run_result_does_not_promote_commentary_only_to_final(
)
async with AsyncCodex(config=harness.app_server_config()) as codex:
result = await (await codex.thread_start()).run(
"case: async commentary only"
)
result = await (await codex.thread_start()).run("case: async commentary only")
assert {
"final_response": result.final_response,

View File

@@ -3,12 +3,6 @@ from __future__ import annotations
import asyncio
from app_server_harness import AppServerHarness
from openai_codex import AsyncCodex, Codex, TextInput
from openai_codex.generated.v2_all import (
AgentMessageDeltaNotification,
TurnCompletedNotification,
TurnStatus,
)
from app_server_helpers import (
agent_message_texts,
next_async_delta,
@@ -16,13 +10,18 @@ from app_server_helpers import (
streaming_response,
)
from openai_codex import AsyncCodex, Codex, TextInput
from openai_codex.generated.v2_all import (
AgentMessageDeltaNotification,
TurnCompletedNotification,
TurnStatus,
)
def test_sync_stream_routes_text_deltas_and_completion(tmp_path) -> None:
"""A sync turn stream should expose deltas, completed items, and completion."""
with AppServerHarness(tmp_path) as harness:
harness.responses.enqueue_sse(
streaming_response("stream-1", "msg-stream-1", ["hel", "lo"])
)
harness.responses.enqueue_sse(streaming_response("stream-1", "msg-stream-1", ["he", "llo"]))
with Codex(config=harness.app_server_config()) as codex:
thread = codex.thread_start()
@@ -42,7 +41,7 @@ def test_sync_stream_routes_text_deltas_and_completion(tmp_path) -> None:
if isinstance(event.payload, TurnCompletedNotification)
],
} == {
"deltas": ["hel", "lo"],
"deltas": ["he", "llo"],
"agent_messages": ["hello"],
"completed_statuses": [TurnStatus.completed],
}

View File

@@ -1,9 +1,10 @@
from __future__ import annotations
from app_server_harness import AppServerHarness
from app_server_helpers import agent_message_texts, streaming_response
from openai_codex import Codex, TextInput
from openai_codex.generated.v2_all import TurnStatus
from app_server_helpers import agent_message_texts, streaming_response
def test_turn_steer_adds_follow_up_input(tmp_path) -> None:
@@ -30,9 +31,7 @@ def test_turn_steer_adds_follow_up_input(tmp_path) -> None:
"steered_turn_id": steer.turn_id,
"turn_id": turn.id,
"agent_messages": agent_message_texts(events),
"last_user_texts": [
request.message_input_texts("user")[-1] for request in requests
],
"last_user_texts": [request.message_input_texts("user")[-1] for request in requests],
} == {
"steered_turn_id": turn.id,
"turn_id": turn.id,

View File

@@ -5,12 +5,12 @@ import importlib.util
import io
import json
import sys
import tomllib
import urllib.error
from pathlib import Path
from typing import Sequence
import pytest
import tomllib
ROOT = Path(__file__).resolve().parents[1]
@@ -32,9 +32,7 @@ def _load_runtime_setup_module():
runtime_setup_path = ROOT / "_runtime_setup.py"
spec = importlib.util.spec_from_file_location("_runtime_setup", runtime_setup_path)
if spec is None or spec.loader is None:
raise AssertionError(
f"Failed to load runtime setup module: {runtime_setup_path}"
)
raise AssertionError(f"Failed to load runtime setup module: {runtime_setup_path}")
module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
@@ -47,6 +45,40 @@ def test_generation_has_single_maintenance_entrypoint_script() -> None:
assert scripts == ["update_sdk_artifacts.py"]
def test_root_fmt_recipe_formats_rust_and_python_sdk() -> None:
"""The repo fmt command should work from Rust and Python SDK directories."""
justfile = ROOT.parents[1] / "justfile"
lines = justfile.read_text().splitlines()
fmt_index = lines.index("fmt:")
next_recipe_index = next(
index
for index in range(fmt_index + 1, len(lines))
if lines[index] and not lines[index].startswith((" ", "\t", "#"))
)
fmt_recipe = lines[fmt_index:next_recipe_index]
actual = {
"working_directory": lines[0],
"previous_attribute": lines[fmt_index - 1],
"commands": [line.strip() for line in fmt_recipe[1:] if line.strip()],
}
expected = {
"working_directory": 'set working-directory := "codex-rs"',
"previous_attribute": "# Format Rust and Python SDK code.",
"commands": [
"cargo fmt -- --config imports_granularity=Item 2>/dev/null",
"uv run --project ../sdk/python --extra dev ruff check --fix --fix-only ../sdk/python",
"uv run --project ../sdk/python --extra dev ruff format ../sdk/python",
],
}
assert actual == expected, (
"The root `just fmt` recipe must run Rust fmt and Python SDK Ruff. "
"Fix the `fmt` recipe in `justfile`, then run `just fmt`.\n"
f"Expected: {json.dumps(expected, indent=2)}\n"
f"Actual: {json.dumps(actual, indent=2)}"
)
def test_generate_types_wires_all_generation_steps() -> None:
"""The type generation command should refresh every schema-derived artifact."""
source = (ROOT / "scripts" / "update_sdk_artifacts.py").read_text()
@@ -56,8 +88,7 @@ def test_generate_types_wires_all_generation_steps() -> None:
(
node
for node in tree.body
if isinstance(node, ast.FunctionDef)
and node.name == "generate_types_from_schema_dir"
if isinstance(node, ast.FunctionDef) and node.name == "generate_types_from_schema_dir"
),
None,
)
@@ -94,8 +125,7 @@ def test_schema_normalization_only_flattens_string_literal_oneofs(
flattened = [
name
for name, definition in definitions.items()
if isinstance(definition, dict)
and script._flatten_string_enum_one_of(definition.copy())
if isinstance(definition, dict) and script._flatten_string_enum_one_of(definition.copy())
]
assert flattened == [
@@ -172,8 +202,7 @@ def test_examples_readme_points_to_runtime_version_source_of_truth() -> None:
def test_runtime_distribution_name_is_consistent() -> None:
script = _load_update_script_module()
runtime_setup = _load_runtime_setup_module()
from openai_codex import client as client_module
from openai_codex import _version
from openai_codex import _version, client as client_module
assert script.SDK_DISTRIBUTION_NAME == "openai-codex"
assert runtime_setup.SDK_PACKAGE_NAME == "openai-codex"
@@ -232,22 +261,6 @@ def test_release_metadata_retries_without_invalid_auth(
assert authorizations == ["Bearer invalid-token", None]
def test_source_sdk_package_pins_published_runtime() -> None:
"""The source package metadata should pin the runtime wheel that ships schemas."""
pyproject = tomllib.loads((ROOT / "pyproject.toml").read_text())
assert {
"sdk_version": pyproject["project"]["version"],
"dependencies": pyproject["project"]["dependencies"],
} == {
"sdk_version": "0.131.0a4",
"dependencies": [
"pydantic>=2.12",
"openai-codex-cli-bin==0.131.0a4",
],
}
def test_runtime_setup_uses_pep440_package_version_and_codex_release_tags() -> None:
"""The SDK uses PEP 440 package pins and converts only when fetching releases."""
runtime_setup = _load_runtime_setup_module()
@@ -259,17 +272,12 @@ def test_runtime_setup_uses_pep440_package_version_and_codex_release_tags() -> N
f"{runtime_setup.PACKAGE_NAME}=={pyproject['project']['version']}"
in pyproject["project"]["dependencies"]
)
assert (
runtime_setup._normalized_package_version("rust-v0.116.0-alpha.1")
== "0.116.0a1"
)
assert runtime_setup._normalized_package_version("rust-v0.116.0-alpha.1") == "0.116.0a1"
assert runtime_setup._release_tag("0.116.0a1") == "rust-v0.116.0-alpha.1"
def test_runtime_package_is_wheel_only_and_builds_platform_specific_wheels() -> None:
pyproject = tomllib.loads(
(ROOT.parent / "python-runtime" / "pyproject.toml").read_text()
)
pyproject = tomllib.loads((ROOT.parent / "python-runtime" / "pyproject.toml").read_text())
hook_source = (ROOT.parent / "python-runtime" / "hatch_build.py").read_text()
hook_tree = ast.parse(hook_source)
initialize_fn = next(
@@ -411,9 +419,7 @@ def test_stage_runtime_release_copies_resource_binaries(tmp_path: Path) -> None:
)
assert {
path.relative_to(
staged / "src" / "codex_cli_bin" / "bin"
).as_posix(): path.read_text()
path.relative_to(staged / "src" / "codex_cli_bin" / "bin").as_posix(): path.read_text()
for path in (staged / "src" / "codex_cli_bin" / "bin").iterdir()
} == {
script.runtime_binary_name(): "fake codex\n",
@@ -502,9 +508,7 @@ def test_staged_sdk_and_runtime_versions_match(tmp_path: Path) -> None:
sdk_pyproject = tomllib.loads((sdk_stage / "pyproject.toml").read_text())
runtime_pyproject = tomllib.loads((runtime_stage / "pyproject.toml").read_text())
assert (
sdk_pyproject["project"]["version"] == runtime_pyproject["project"]["version"]
)
assert sdk_pyproject["project"]["version"] == runtime_pyproject["project"]["version"]
assert sdk_pyproject["project"]["dependencies"] == [
"pydantic>=2.12",
"openai-codex-cli-bin==0.116.0a1",
@@ -629,9 +633,7 @@ def test_stage_runtime_stages_binary_without_type_generation(tmp_path: Path) ->
script.run_command(args, ops)
assert calls == [
"stage_runtime:0.116.0a1:musllinux_1_1_x86_64:helper,fallback-helper"
]
assert calls == ["stage_runtime:0.116.0a1:musllinux_1_1_x86_64:helper,fallback-helper"]
def test_default_runtime_is_resolved_from_installed_runtime_package(

View File

@@ -12,6 +12,7 @@ from openai_codex.models import Notification, UnknownNotification
def test_async_client_allows_concurrent_transport_calls() -> None:
"""Async wrappers should offload sync calls so concurrent awaits can overlap."""
async def scenario() -> int:
"""Run two blocking sync calls and report peak overlap."""
client = AsyncAppServerClient()
@@ -36,6 +37,7 @@ def test_async_client_allows_concurrent_transport_calls() -> None:
def test_async_client_turn_notification_methods_delegate_to_sync_client() -> None:
"""Async turn routing methods should preserve sync-client registration semantics."""
async def scenario() -> tuple[list[tuple[str, str]], Notification, str]:
"""Record the sync-client calls made by async turn notification wrappers."""
client = AsyncAppServerClient()

View File

@@ -111,9 +111,7 @@ def test_unknown_notifications_fall_back_to_unknown_payloads() -> None:
def test_invalid_notification_payload_falls_back_to_unknown() -> None:
client = AppServerClient()
event = client._coerce_notification(
"thread/tokenUsage/updated", {"threadId": "missing"}
)
event = client._coerce_notification("thread/tokenUsage/updated", {"threadId": "missing"})
assert event.method == "thread/tokenUsage/updated"
assert isinstance(event.payload, UnknownNotification)

View File

@@ -31,10 +31,7 @@ def _snapshot_target(root: Path, rel_path: Path) -> dict[str, bytes] | bytes | N
def _snapshot_targets(root: Path) -> dict[str, dict[str, bytes] | bytes | None]:
"""Capture all checked-in generated artifacts before and after regeneration."""
return {
str(rel_path): _snapshot_target(root, rel_path)
for rel_path in GENERATED_TARGETS
}
return {str(rel_path): _snapshot_target(root, rel_path) for rel_path in GENERATED_TARGETS}
def test_generated_files_are_up_to_date():

View File

@@ -7,13 +7,13 @@ from typing import Any
import pytest
import openai_codex.api as public_api_module
from openai_codex.generated.v2_all import TurnStartParams
from openai_codex.models import InitializeResponse
from openai_codex.api import (
ApprovalMode,
AsyncCodex,
Codex,
)
from openai_codex.generated.v2_all import TurnStartParams
from openai_codex.models import InitializeResponse
ROOT = Path(__file__).resolve().parents[1]
@@ -129,9 +129,7 @@ def test_async_codex_initializes_only_once_under_concurrency() -> None:
def _approval_mode_turn_params(approval_mode: ApprovalMode) -> TurnStartParams:
"""Build real generated turn params from one public approval mode."""
approval_policy, approvals_reviewer = public_api_module._approval_mode_settings(
approval_mode
)
approval_policy, approvals_reviewer = public_api_module._approval_mode_settings(approval_mode)
return TurnStartParams(
thread_id="thread-1",
input=[],

View File

@@ -2,15 +2,16 @@ from __future__ import annotations
import importlib.resources as resources
import inspect
import tomllib
from pathlib import Path
from typing import Any
import tomllib
import openai_codex
import openai_codex.types as public_types
from openai_codex import (
AppServerConfig,
ApprovalMode,
AppServerConfig,
AsyncCodex,
AsyncThread,
Codex,
@@ -107,9 +108,7 @@ def _assert_no_any_annotations(fn: object) -> None:
signature = inspect.signature(fn)
for param in signature.parameters.values():
if param.annotation is Any:
raise AssertionError(
f"{fn} has public parameter typed as Any: {param.name}"
)
raise AssertionError(f"{fn} has public parameter typed as Any: {param.name}")
if signature.return_annotation is Any:
raise AssertionError(f"{fn} has public return annotation typed as Any")
@@ -150,9 +149,9 @@ def test_package_includes_py_typed_marker() -> None:
def test_package_root_exports_only_public_api() -> None:
"""The package root should expose the supported SDK surface, not internals."""
assert openai_codex.__all__ == EXPECTED_ROOT_EXPORTS
assert {name: hasattr(openai_codex, name) for name in EXPECTED_ROOT_EXPORTS} == {
name: True for name in EXPECTED_ROOT_EXPORTS
}
assert {name: hasattr(openai_codex, name) for name in EXPECTED_ROOT_EXPORTS} == dict.fromkeys(
EXPECTED_ROOT_EXPORTS, True
)
assert {
"AppServerClient": hasattr(openai_codex, "AppServerClient"),
"AsyncAppServerClient": hasattr(openai_codex, "AsyncAppServerClient"),
@@ -184,9 +183,9 @@ def test_package_star_import_matches_public_api() -> None:
def test_types_module_exports_curated_public_types() -> None:
"""The public type module should be the supported place for app-server models."""
assert public_types.__all__ == EXPECTED_TYPES_EXPORTS
assert {name: hasattr(public_types, name) for name in EXPECTED_TYPES_EXPORTS} == {
name: True for name in EXPECTED_TYPES_EXPORTS
}
assert {name: hasattr(public_types, name) for name in EXPECTED_TYPES_EXPORTS} == dict.fromkeys(
EXPECTED_TYPES_EXPORTS, True
)
def test_types_star_import_matches_public_types() -> None:
@@ -390,9 +389,9 @@ def test_new_thread_methods_default_to_auto_review() -> None:
AsyncCodex.thread_start,
]
assert {fn: _keyword_default(fn, "approval_mode") for fn in funcs} == {
fn: ApprovalMode.auto_review for fn in funcs
}
assert {fn: _keyword_default(fn, "approval_mode") for fn in funcs} == dict.fromkeys(
funcs, ApprovalMode.auto_review
)
def test_existing_thread_methods_default_to_preserving_approval_settings() -> None:
@@ -408,9 +407,7 @@ def test_existing_thread_methods_default_to_preserving_approval_settings() -> No
AsyncThread.run,
]
assert {fn: _keyword_default(fn, "approval_mode") for fn in funcs} == {
fn: None for fn in funcs
}
assert {fn: _keyword_default(fn, "approval_mode") for fn in funcs} == dict.fromkeys(funcs)
def test_lifecycle_methods_are_codex_scoped() -> None:
@@ -462,6 +459,4 @@ def test_initialize_metadata_requires_non_empty_information() -> None:
except RuntimeError as exc:
assert "missing required metadata" in str(exc)
else:
raise AssertionError(
"expected RuntimeError when initialize metadata is missing"
)
raise AssertionError("expected RuntimeError when initialize metadata is missing")

View File

@@ -539,7 +539,9 @@ def test_real_examples_run_and_assert(
assert "actions:" in out
assert "Items:" in out
elif folder == "13_model_select_and_turn_params":
assert "selected.model:" in out and "agent.message.params:" in out and "items.params:" in out
assert (
"selected.model:" in out and "agent.message.params:" in out and "items.params:" in out
)
elif folder == "14_turn_controls":
assert "steer.result:" in out and "steer.final.status:" in out
assert "interrupt.result:" in out and "interrupt.final.status:" in out

2
sdk/python/uv.lock generated
View File

@@ -302,7 +302,7 @@ requires-dist = [
{ name = "openai-codex-cli-bin", specifier = "==0.131.0a4" },
{ name = "pydantic", specifier = ">=2.12" },
{ name = "pytest", marker = "extra == 'dev'", specifier = ">=8.0" },
{ name = "ruff", marker = "extra == 'dev'", specifier = ">=0.11" },
{ name = "ruff", marker = "extra == 'dev'", specifier = ">=0.15.8" },
]
provides-extras = ["dev"]