{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Codex Python SDK Walkthrough\n", "\n", "Public SDK surface only (`openai_codex` root exports)." ] }, { "cell_type": "code", "execution_count": null, "id": "1b6614a5", "metadata": {}, "outputs": [], "source": [ "# Cell 1: bootstrap local SDK imports + pinned runtime package\n", "import os\n", "import sys\n", "from pathlib import Path\n", "\n", "if sys.version_info < (3, 10):\n", " raise RuntimeError(\n", " f'Notebook requires Python 3.10+; current interpreter is {sys.version.split()[0]}.'\n", " )\n", "\n", "def _is_sdk_python_dir(path: Path) -> bool:\n", " return (path / 'pyproject.toml').exists() and (path / 'src' / 'openai_codex').exists()\n", "\n", "\n", "def _find_sdk_python_dir(start: Path) -> Path | None:\n", " checked = set()\n", "\n", " def _consider(candidate: Path) -> Path | None:\n", " resolved = candidate.resolve()\n", " if resolved in checked:\n", " return None\n", " checked.add(resolved)\n", " if _is_sdk_python_dir(resolved):\n", " return resolved\n", " return None\n", "\n", " for candidate in [start, *start.parents]:\n", " found = _consider(candidate)\n", " if found is not None:\n", " return found\n", "\n", " for candidate in [start / 'sdk' / 'python', *(parent / 'sdk' / 'python' for parent in start.parents)]:\n", " found = _consider(candidate)\n", " if found is not None:\n", " return found\n", "\n", " env_dir = os.environ.get('CODEX_PYTHON_SDK_DIR')\n", " if env_dir:\n", " found = _consider(Path(env_dir).expanduser())\n", " if found is not None:\n", " return found\n", "\n", " return None\n", "\n", "\n", "repo_python_dir = _find_sdk_python_dir(Path.cwd())\n", "if repo_python_dir is None:\n", " raise RuntimeError('Could not locate sdk/python. Set CODEX_PYTHON_SDK_DIR to your sdk/python path.')\n", "\n", "repo_python_str = str(repo_python_dir)\n", "if repo_python_str not in sys.path:\n", " sys.path.insert(0, repo_python_str)\n", "\n", "from _runtime_setup import ensure_runtime_package_installed\n", "\n", "runtime_version = ensure_runtime_package_installed(\n", " sys.executable,\n", " repo_python_dir,\n", ")\n", "\n", "src_dir = repo_python_dir / 'src'\n", "examples_dir = repo_python_dir / 'examples'\n", "src_str = str(src_dir)\n", "examples_str = str(examples_dir)\n", "if src_str not in sys.path:\n", " sys.path.insert(0, src_str)\n", "if examples_str not in sys.path:\n", " sys.path.insert(0, examples_str)\n", "\n", "# Force fresh imports after SDK upgrades in the same notebook kernel.\n", "for module_name in list(sys.modules):\n", " if module_name == 'openai_codex' or module_name.startswith('openai_codex.'):\n", " sys.modules.pop(module_name, None)\n", "\n", "print('Kernel:', sys.executable)\n", "print('SDK source:', src_dir)\n", "print('Runtime package:', runtime_version)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "137a6d64", "metadata": {}, "outputs": [], "source": [ "# Cell 2: imports (public only)\n", "from _bootstrap import server_label\n", "from openai_codex import (\n", " AsyncCodex,\n", " Codex,\n", " ImageInput,\n", " LocalImageInput,\n", " TextInput,\n", " retry_on_overload,\n", ")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "5fae892d", "metadata": {}, "outputs": [], "source": [ "# Cell 2b: browser login handle lifecycle\n", "with Codex() as codex:\n", " # Open this URL and call `wait()` without canceling when completing login for real.\n", " login = codex.login_chatgpt()\n", " print('Please complete login at:', login.auth_url)\n", " completed = login.wait()\n", " account = codex.account()\n", "\n", " print('login.id:', login.login_id)\n", " print('login.auth_url:', login.auth_url)\n", " print('login.completed.success:', completed.success)\n", " print('account:', account.email)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "ebdc04d9", "metadata": {}, "outputs": [], "source": [ "# Cell 3: simple sync conversation\n", "with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " result = thread.run('Explain gradient descent in 3 bullets.')\n", " print(result.final_response)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "bb4abb96", "metadata": {}, "outputs": [], "source": [ "# Cell 4: multi-turn continuity in same thread\n", "with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " first = thread.turn('Give a short summary of transformers.').run()\n", " second = thread.turn('Now explain that to a high-school student.').run()\n", " print('first status:', first.status)\n", " print('second status:', second.status)\n", " print('second text:', second.final_response)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "8b0c80fd", "metadata": {}, "outputs": [], "source": [ "# Cell 5: full thread lifecycle and branching (sync)\n", "with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " first = thread.turn('One sentence about structured planning.').run()\n", " second = thread.turn('Now restate it for a junior engineer.').run()\n", "\n", " reopened = codex.thread_resume(thread.id)\n", " listing_active = codex.thread_list(limit=20, archived=False)\n", " reading = reopened.read(include_turns=True)\n", "\n", " _ = reopened.set_name('sdk-lifecycle-demo')\n", " _ = codex.thread_archive(reopened.id)\n", " listing_archived = codex.thread_list(limit=20, archived=True)\n", " unarchived = codex.thread_unarchive(reopened.id)\n", "\n", " resumed = codex.thread_resume(\n", " unarchived.id,\n", " model='gpt-5.4',\n", " config={'model_reasoning_effort': 'high'},\n", " )\n", " resumed_result = resumed.turn('Continue in one short sentence.').run()\n", "\n", " forked = codex.thread_fork(unarchived.id, model='gpt-5.4')\n", " forked_result = forked.turn('Take a different angle in one short sentence.').run()\n", "\n", " compact_result = unarchived.compact()\n", "\n", " print('Lifecycle OK:', thread.id)\n", " print('first:', first.id, first.status)\n", " print('second:', second.id, second.status)\n", " print('read.turns:', len(reading.thread.turns))\n", " print('list.active:', len(listing_active.data))\n", " print('list.archived:', len(listing_archived.data))\n", " print('resumed:', resumed_result.id, resumed_result.status)\n", " print('forked:', forked_result.id, forked_result.status)\n", " print('compact:', compact_result.model_dump(mode='json', by_alias=True))\n" ] }, { "cell_type": "code", "execution_count": null, "id": "310db8c0", "metadata": {}, "outputs": [], "source": [ "# Cell 5b: one turn with most optional turn params\n", "from pathlib import Path\n", "from openai_codex import (\n", " Personality,\n", " ReasoningEffort,\n", " ReasoningSummary,\n", " SandboxPolicy,\n", ")\n", "\n", "output_schema = {\n", " 'type': 'object',\n", " 'properties': {\n", " 'summary': {'type': 'string'},\n", " 'actions': {'type': 'array', 'items': {'type': 'string'}},\n", " },\n", " 'required': ['summary', 'actions'],\n", " 'additionalProperties': False,\n", "}\n", "\n", "sandbox_policy = SandboxPolicy.model_validate({'type': 'readOnly', 'access': {'type': 'fullAccess'}})\n", "summary = ReasoningSummary.model_validate('concise')\n", "\n", "with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " turn = thread.turn(\n", " 'Propose a safe production feature-flag rollout. Return JSON matching the schema.',\n", " cwd=str(Path.cwd()),\n", " effort=ReasoningEffort.medium,\n", " model='gpt-5.4',\n", " output_schema=output_schema,\n", " personality=Personality.pragmatic,\n", " sandbox_policy=sandbox_policy,\n", " summary=summary,\n", " )\n", " result = turn.run()\n", " print('status:', result.status)\n", " print(result.final_response)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "7a33c97d", "metadata": {}, "outputs": [], "source": [ "# Cell 5c: choose highest model + highest supported reasoning, then run turns\n", "from pathlib import Path\n", "from openai_codex import (\n", " Personality,\n", " ReasoningEffort,\n", " ReasoningSummary,\n", " SandboxPolicy,\n", ")\n", "\n", "reasoning_rank = {\n", " 'none': 0,\n", " 'minimal': 1,\n", " 'low': 2,\n", " 'medium': 3,\n", " 'high': 4,\n", " 'xhigh': 5,\n", "}\n", "\n", "\n", "def pick_highest_model(models):\n", " visible = [m for m in models if not m.hidden]\n", " if not visible:\n", " raise RuntimeError('models response did not include visible models')\n", " known_names = {m.id for m in visible} | {m.model for m in visible}\n", " top_candidates = [m for m in visible if not (m.upgrade and m.upgrade in known_names)]\n", " if not top_candidates:\n", " raise RuntimeError('models response did not include top-level visible models')\n", " return max(top_candidates, key=lambda m: (m.model, m.id))\n", "\n", "\n", "def pick_highest_turn_effort(model) -> ReasoningEffort:\n", " if not model.supported_reasoning_efforts:\n", " raise RuntimeError(f'{model.model} did not advertise supported reasoning efforts')\n", " best = max(model.supported_reasoning_efforts, key=lambda opt: reasoning_rank[opt.reasoning_effort.value])\n", " return ReasoningEffort(best.reasoning_effort.value)\n", "\n", "\n", "output_schema = {\n", " 'type': 'object',\n", " 'properties': {\n", " 'summary': {'type': 'string'},\n", " 'actions': {'type': 'array', 'items': {'type': 'string'}},\n", " },\n", " 'required': ['summary', 'actions'],\n", " 'additionalProperties': False,\n", "}\n", "sandbox_policy = SandboxPolicy.model_validate({'type': 'readOnly', 'access': {'type': 'fullAccess'}})\n", "\n", "with Codex() as codex:\n", " models = codex.models(include_hidden=True)\n", " selected_model = pick_highest_model(models.data)\n", " selected_effort = pick_highest_turn_effort(selected_model)\n", "\n", " print('selected.model:', selected_model.model)\n", " print('selected.effort:', selected_effort.value)\n", "\n", " thread = codex.thread_start(model=selected_model.model, config={'model_reasoning_effort': selected_effort.value})\n", "\n", " first = thread.turn(\n", " 'Give one short sentence about reliable production releases.',\n", " model=selected_model.model,\n", " effort=selected_effort,\n", " ).run()\n", " print('agent.message:', first.final_response)\n", " print('items:', len(first.items))\n", "\n", " second = thread.turn(\n", " 'Return JSON for a safe feature-flag rollout plan.',\n", " cwd=str(Path.cwd()),\n", " effort=selected_effort,\n", " model=selected_model.model,\n", " output_schema=output_schema,\n", " personality=Personality.pragmatic,\n", " sandbox_policy=sandbox_policy,\n", " summary=ReasoningSummary.model_validate('concise'),\n", " ).run()\n", " print('agent.message.params:', second.final_response)\n", " print('items.params:', len(second.items))\n" ] }, { "cell_type": "code", "execution_count": null, "id": "e9aef26a", "metadata": {}, "outputs": [], "source": [ "# Cell 6: multimodal with remote image\n", "remote_image_url = 'https://raw.githubusercontent.com/github/explore/main/topics/python/python.png'\n", "\n", "with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " result = thread.turn([\n", " TextInput('What do you see in this image? 3 bullets.'),\n", " ImageInput(remote_image_url),\n", " ]).run()\n", " print('status:', result.status)\n", " print(result.final_response)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "a0cecc6c", "metadata": {}, "outputs": [], "source": [ "# Cell 7: multimodal with local image (generated temporary file)\n", "with temporary_sample_image_path() as local_image_path:\n", " with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " result = thread.turn([\n", " TextInput('Describe the colors and layout in this generated local image in 2 bullets.'),\n", " LocalImageInput(str(local_image_path.resolve())),\n", " ]).run()\n", " print('status:', result.status)\n", " print(result.final_response)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "91afa2b8", "metadata": {}, "outputs": [], "source": [ "# Cell 8: retry-on-overload pattern\n", "with Codex() as codex:\n", " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", "\n", " result = retry_on_overload(\n", " lambda: thread.turn('List 5 failure modes in distributed systems.').run(),\n", " max_attempts=3,\n", " initial_delay_s=0.25,\n", " max_delay_s=2.0,\n", " )\n", " print('status:', result.status)\n", " print(result.final_response)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "103be934", "metadata": {}, "outputs": [], "source": [ "# Cell 9: full thread lifecycle and branching (async)\n", "import asyncio\n", "\n", "\n", "async def async_lifecycle_demo():\n", " async with AsyncCodex() as codex:\n", " thread = await codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " first = await (await thread.turn('One sentence about structured planning.')).run()\n", " second = await (await thread.turn('Now restate it for a junior engineer.')).run()\n", "\n", " reopened = await codex.thread_resume(thread.id)\n", " listing_active = await codex.thread_list(limit=20, archived=False)\n", " reading = await reopened.read(include_turns=True)\n", "\n", " _ = await reopened.set_name('sdk-lifecycle-demo')\n", " _ = await codex.thread_archive(reopened.id)\n", " listing_archived = await codex.thread_list(limit=20, archived=True)\n", " unarchived = await codex.thread_unarchive(reopened.id)\n", "\n", " resumed = await codex.thread_resume(\n", " unarchived.id,\n", " model='gpt-5.4',\n", " config={'model_reasoning_effort': 'high'},\n", " )\n", " resumed_result = await (await resumed.turn('Continue in one short sentence.')).run()\n", "\n", " forked = await codex.thread_fork(unarchived.id, model='gpt-5.4')\n", " forked_result = await (await forked.turn('Take a different angle in one short sentence.')).run()\n", "\n", " compact_result = await unarchived.compact()\n", "\n", " print('Lifecycle OK:', thread.id)\n", " print('first:', first.id, first.status)\n", " print('second:', second.id, second.status)\n", " print('read.turns:', len(reading.thread.turns))\n", " print('list.active:', len(listing_active.data))\n", " print('list.archived:', len(listing_archived.data))\n", " print('resumed:', resumed_result.id, resumed_result.status)\n", " print('forked:', forked_result.id, forked_result.status)\n", " print('compact:', compact_result.model_dump(mode='json', by_alias=True))\n", "\n", "\n", "await async_lifecycle_demo()\n" ] }, { "cell_type": "code", "execution_count": null, "id": "365aa10c", "metadata": {}, "outputs": [], "source": [ "# Cell 10: async turn controls (steer + interrupt)\n", "import asyncio\n", "\n", "\n", "async def async_stream_demo():\n", " async with AsyncCodex() as codex:\n", " thread = await codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " steer_turn = await thread.turn('Count from 1 to 40 with commas, then one summary sentence.')\n", "\n", " steer_result = await steer_turn.steer('Keep it brief and stop after 10 numbers.')\n", "\n", " steer_event_count = 0\n", " steer_completed_status = None\n", " steer_deltas = []\n", " async for event in steer_turn.stream():\n", " steer_event_count += 1\n", " if event.method == 'item/agentMessage/delta':\n", " steer_deltas.append(event.payload.delta)\n", " continue\n", " if event.method == 'turn/completed':\n", " steer_completed_status = event.payload.turn.status.value\n", "\n", " if steer_completed_status is None:\n", " raise RuntimeError('stream ended without turn/completed')\n", " steer_preview = ''.join(steer_deltas).strip()\n", "\n", " interrupt_turn = await thread.turn('Count from 1 to 200 with commas, then one summary sentence.')\n", " interrupt_result = await interrupt_turn.interrupt()\n", "\n", " interrupt_event_count = 0\n", " interrupt_completed_status = None\n", " interrupt_deltas = []\n", " async for event in interrupt_turn.stream():\n", " interrupt_event_count += 1\n", " if event.method == 'item/agentMessage/delta':\n", " interrupt_deltas.append(event.payload.delta)\n", " continue\n", " if event.method == 'turn/completed':\n", " interrupt_completed_status = event.payload.turn.status.value\n", "\n", " if interrupt_completed_status is None:\n", " raise RuntimeError('stream ended without turn/completed')\n", " interrupt_preview = ''.join(interrupt_deltas).strip()\n", "\n", " print('steer.result:', steer_result.model_dump(mode='json', by_alias=True))\n", " print('steer.final.status:', steer_completed_status)\n", " print('steer.events.count:', steer_event_count)\n", " print('steer.assistant.preview:', steer_preview)\n", " print('interrupt.result:', interrupt_result.model_dump(mode='json', by_alias=True))\n", " print('interrupt.final.status:', interrupt_completed_status)\n", " print('interrupt.events.count:', interrupt_event_count)\n", " print('interrupt.assistant.preview:', interrupt_preview)\n", "\n", "\n", "await async_stream_demo()\n" ] } ], "metadata": { "kernelspec": { "display_name": ".venv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.14.3" } }, "nbformat": 4, "nbformat_minor": 5 }