mirror of
https://github.com/openai/codex.git
synced 2026-05-16 01:02:48 +00:00
Focus Python SDK approval mode
Default high-level thread and turn starts to auto-review, keep deny_all as the explicit opt-out, and remove the generated AskForApproval alias customization. Co-authored-by: Codex <noreply@openai.com>
This commit is contained in:
@@ -18,7 +18,6 @@ ensure_local_sdk_src()
|
||||
import asyncio
|
||||
|
||||
from openai_codex import (
|
||||
ApprovalMode,
|
||||
AsyncCodex,
|
||||
TextInput,
|
||||
)
|
||||
@@ -46,18 +45,14 @@ PROMPT = (
|
||||
"Analyze a safe rollout plan for enabling a feature flag in production. "
|
||||
"Return JSON matching the requested schema."
|
||||
)
|
||||
APPROVAL_MODE = ApprovalMode.auto_review
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
async with AsyncCodex(config=runtime_config()) as codex:
|
||||
thread = await codex.thread_start(
|
||||
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
||||
)
|
||||
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
|
||||
turn = await thread.turn(
|
||||
TextInput(PROMPT),
|
||||
approval_mode=APPROVAL_MODE,
|
||||
output_schema=OUTPUT_SCHEMA,
|
||||
personality=Personality.pragmatic,
|
||||
summary=SUMMARY,
|
||||
@@ -69,16 +64,12 @@ async def main() -> None:
|
||||
try:
|
||||
structured = json.loads(structured_text)
|
||||
except json.JSONDecodeError as exc:
|
||||
raise RuntimeError(
|
||||
f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}"
|
||||
) from exc
|
||||
raise RuntimeError(f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}") from exc
|
||||
|
||||
summary = structured.get("summary")
|
||||
actions = structured.get("actions")
|
||||
if (
|
||||
not isinstance(summary, str)
|
||||
or not isinstance(actions, list)
|
||||
or not all(isinstance(action, str) for action in actions)
|
||||
if not isinstance(summary, str) or not isinstance(actions, list) or not all(
|
||||
isinstance(action, str) for action in actions
|
||||
):
|
||||
raise RuntimeError(
|
||||
f"Expected structured output with string summary/actions, got: {structured!r}"
|
||||
@@ -89,9 +80,7 @@ async def main() -> None:
|
||||
print("actions:")
|
||||
for action in actions:
|
||||
print("-", action)
|
||||
print(
|
||||
"Items:", 0 if persisted_turn is None else len(persisted_turn.items or [])
|
||||
)
|
||||
print("Items:", 0 if persisted_turn is None else len(persisted_turn.items or []))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -16,7 +16,6 @@ from _bootstrap import (
|
||||
ensure_local_sdk_src()
|
||||
|
||||
from openai_codex import (
|
||||
ApprovalMode,
|
||||
Codex,
|
||||
TextInput,
|
||||
)
|
||||
@@ -44,16 +43,12 @@ PROMPT = (
|
||||
"Analyze a safe rollout plan for enabling a feature flag in production. "
|
||||
"Return JSON matching the requested schema."
|
||||
)
|
||||
APPROVAL_MODE = ApprovalMode.auto_review
|
||||
|
||||
with Codex(config=runtime_config()) as codex:
|
||||
thread = codex.thread_start(
|
||||
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
||||
)
|
||||
thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
|
||||
turn = thread.turn(
|
||||
TextInput(PROMPT),
|
||||
approval_mode=APPROVAL_MODE,
|
||||
output_schema=OUTPUT_SCHEMA,
|
||||
personality=Personality.pragmatic,
|
||||
summary=SUMMARY,
|
||||
@@ -65,20 +60,14 @@ with Codex(config=runtime_config()) as codex:
|
||||
try:
|
||||
structured = json.loads(structured_text)
|
||||
except json.JSONDecodeError as exc:
|
||||
raise RuntimeError(
|
||||
f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}"
|
||||
) from exc
|
||||
raise RuntimeError(f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}") from exc
|
||||
|
||||
summary = structured.get("summary")
|
||||
actions = structured.get("actions")
|
||||
if (
|
||||
not isinstance(summary, str)
|
||||
or not isinstance(actions, list)
|
||||
or not all(isinstance(action, str) for action in actions)
|
||||
if not isinstance(summary, str) or not isinstance(actions, list) or not all(
|
||||
isinstance(action, str) for action in actions
|
||||
):
|
||||
raise RuntimeError(
|
||||
f"Expected structured output with string summary/actions, got: {structured!r}"
|
||||
)
|
||||
raise RuntimeError(f"Expected structured output with string summary/actions, got: {structured!r}")
|
||||
|
||||
print("Status:", result.status)
|
||||
print("summary:", summary)
|
||||
|
||||
@@ -5,19 +5,13 @@ _EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(_EXAMPLES_ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(_EXAMPLES_ROOT))
|
||||
|
||||
from _bootstrap import (
|
||||
assistant_text_from_turn,
|
||||
ensure_local_sdk_src,
|
||||
find_turn_by_id,
|
||||
runtime_config,
|
||||
)
|
||||
from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config
|
||||
|
||||
ensure_local_sdk_src()
|
||||
|
||||
import asyncio
|
||||
|
||||
from openai_codex import (
|
||||
ApprovalMode,
|
||||
AsyncCodex,
|
||||
TextInput,
|
||||
)
|
||||
@@ -41,16 +35,11 @@ PREFERRED_MODEL = "gpt-5.4"
|
||||
|
||||
def _pick_highest_model(models):
|
||||
visible = [m for m in models if not m.hidden] or models
|
||||
preferred = next(
|
||||
(m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL),
|
||||
None,
|
||||
)
|
||||
preferred = next((m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL), None)
|
||||
if preferred is not None:
|
||||
return preferred
|
||||
known_names = {m.id for m in visible} | {m.model for m in visible}
|
||||
top_candidates = [
|
||||
m for m in visible if not (m.upgrade and m.upgrade in known_names)
|
||||
]
|
||||
top_candidates = [m for m in visible if not (m.upgrade and m.upgrade in known_names)]
|
||||
pool = top_candidates or visible
|
||||
return max(pool, key=lambda m: (m.model, m.id))
|
||||
|
||||
@@ -85,7 +74,6 @@ SANDBOX_POLICY = SandboxPolicy.model_validate(
|
||||
"access": {"type": "fullAccess"},
|
||||
}
|
||||
)
|
||||
APPROVAL_MODE = ApprovalMode.auto_review
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
@@ -112,16 +100,10 @@ async def main() -> None:
|
||||
first_persisted_turn = find_turn_by_id(persisted.thread.turns, first.id)
|
||||
|
||||
print("agent.message:", assistant_text_from_turn(first_persisted_turn))
|
||||
print(
|
||||
"items:",
|
||||
0
|
||||
if first_persisted_turn is None
|
||||
else len(first_persisted_turn.items or []),
|
||||
)
|
||||
print("items:", 0 if first_persisted_turn is None else len(first_persisted_turn.items or []))
|
||||
|
||||
second_turn = await thread.turn(
|
||||
TextInput("Return JSON for a safe feature-flag rollout plan."),
|
||||
approval_mode=APPROVAL_MODE,
|
||||
cwd=str(Path.cwd()),
|
||||
effort=selected_effort,
|
||||
model=selected_model.model,
|
||||
@@ -135,12 +117,7 @@ async def main() -> None:
|
||||
second_persisted_turn = find_turn_by_id(persisted.thread.turns, second.id)
|
||||
|
||||
print("agent.message.params:", assistant_text_from_turn(second_persisted_turn))
|
||||
print(
|
||||
"items.params:",
|
||||
0
|
||||
if second_persisted_turn is None
|
||||
else len(second_persisted_turn.items or []),
|
||||
)
|
||||
print("items.params:", 0 if second_persisted_turn is None else len(second_persisted_turn.items or []))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -5,17 +5,11 @@ _EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(_EXAMPLES_ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(_EXAMPLES_ROOT))
|
||||
|
||||
from _bootstrap import (
|
||||
assistant_text_from_turn,
|
||||
ensure_local_sdk_src,
|
||||
find_turn_by_id,
|
||||
runtime_config,
|
||||
)
|
||||
from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config
|
||||
|
||||
ensure_local_sdk_src()
|
||||
|
||||
from openai_codex import (
|
||||
ApprovalMode,
|
||||
Codex,
|
||||
TextInput,
|
||||
)
|
||||
@@ -39,16 +33,11 @@ PREFERRED_MODEL = "gpt-5.4"
|
||||
|
||||
def _pick_highest_model(models):
|
||||
visible = [m for m in models if not m.hidden] or models
|
||||
preferred = next(
|
||||
(m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL),
|
||||
None,
|
||||
)
|
||||
preferred = next((m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL), None)
|
||||
if preferred is not None:
|
||||
return preferred
|
||||
known_names = {m.id for m in visible} | {m.model for m in visible}
|
||||
top_candidates = [
|
||||
m for m in visible if not (m.upgrade and m.upgrade in known_names)
|
||||
]
|
||||
top_candidates = [m for m in visible if not (m.upgrade and m.upgrade in known_names)]
|
||||
pool = top_candidates or visible
|
||||
return max(pool, key=lambda m: (m.model, m.id))
|
||||
|
||||
@@ -83,7 +72,6 @@ SANDBOX_POLICY = SandboxPolicy.model_validate(
|
||||
"access": {"type": "fullAccess"},
|
||||
}
|
||||
)
|
||||
APPROVAL_MODE = ApprovalMode.auto_review
|
||||
|
||||
|
||||
with Codex(config=runtime_config()) as codex:
|
||||
@@ -112,7 +100,6 @@ with Codex(config=runtime_config()) as codex:
|
||||
|
||||
second = thread.turn(
|
||||
TextInput("Return JSON for a safe feature-flag rollout plan."),
|
||||
approval_mode=APPROVAL_MODE,
|
||||
cwd=str(Path.cwd()),
|
||||
effort=selected_effort,
|
||||
model=selected_model.model,
|
||||
|
||||
Reference in New Issue
Block a user