mirror of
https://github.com/openai/codex.git
synced 2026-05-16 09:12:54 +00:00
[8/8] Add Python SDK Ruff formatting (#22021)
## Why The Python SDK needs the same tight formatter/lint loop as the rest of the repo: a safe Ruff autofix pass, Ruff formatting, editor save behavior, and CI checks that catch drift. Without that loop, SDK changes can land with formatting or import ordering that differs from what reviewers and CI expect. ## What - Add Ruff configuration to `sdk/python/pyproject.toml`, excluding generated protocol code and notebooks from the normal lint/format pass. - Update `just fmt` so it still formats Rust and also runs Python SDK Ruff autofix and formatting. - Add Python SDK CI steps for `ruff check` and `ruff format --check` before pytest. - Recommend the Ruff VS Code extension and enable Python format/fix/organize-on-save so Cmd+S uses the same tooling. - Apply the resulting Ruff formatting to SDK Python files, examples, and the checked-in generated `v2_all.py` output emitted by the pinned generator. - Add a guard test for the `just fmt` recipe so it keeps working from both Rust and Python SDK working directories. ## Stack 1. #21891 `[1/8]` Pin Python SDK runtime dependency 2. #21893 `[2/8]` Generate Python SDK types from pinned runtime 3. #21895 `[3/8]` Run Python SDK tests in CI 4. #21896 `[4/8]` Define Python SDK public API surface 5. #21905 `[5/8]` Rename Python SDK package to `openai-codex` 6. #21910 `[6/8]` Add high-level Python SDK approval mode 7. #22014 `[7/8]` Add Python SDK app-server integration harness 8. This PR `[8/8]` Add Python SDK Ruff formatting ## Verification - Added `test_root_fmt_recipe_formats_rust_and_python_sdk` for the shared format recipe. - Ran `just fmt` after the recipe update. --------- Co-authored-by: Codex <noreply@openai.com>
This commit is contained in:
@@ -22,7 +22,9 @@ async def main() -> None:
|
||||
async with AsyncCodex(config=runtime_config()) as codex:
|
||||
print("Server:", server_label(codex.metadata))
|
||||
|
||||
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
thread = await codex.thread_start(
|
||||
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
||||
)
|
||||
result = await thread.run("Say hello in one sentence.")
|
||||
print("Items:", len(result.items))
|
||||
print("Text:", result.final_response)
|
||||
|
||||
@@ -21,7 +21,9 @@ from openai_codex import AsyncCodex, TextInput
|
||||
|
||||
async def main() -> None:
|
||||
async with AsyncCodex(config=runtime_config()) as codex:
|
||||
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
thread = await codex.thread_start(
|
||||
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
||||
)
|
||||
turn = await thread.turn(TextInput("Give 3 bullets about SIMD."))
|
||||
result = await turn.run()
|
||||
persisted = await thread.read(include_turns=True)
|
||||
|
||||
@@ -21,7 +21,9 @@ from openai_codex import AsyncCodex, TextInput
|
||||
|
||||
async def main() -> None:
|
||||
async with AsyncCodex(config=runtime_config()) as codex:
|
||||
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
thread = await codex.thread_start(
|
||||
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
||||
)
|
||||
turn = await thread.turn(TextInput("Explain SIMD in 3 short bullets."))
|
||||
|
||||
event_count = 0
|
||||
@@ -44,7 +46,9 @@ async def main() -> None:
|
||||
saw_delta = True
|
||||
continue
|
||||
if event.method == "turn/completed":
|
||||
completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status))
|
||||
completed_status = getattr(
|
||||
event.payload.turn.status, "value", str(event.payload.turn.status)
|
||||
)
|
||||
|
||||
if saw_delta:
|
||||
print()
|
||||
|
||||
@@ -40,7 +40,9 @@ with Codex(config=runtime_config()) as codex:
|
||||
saw_delta = True
|
||||
continue
|
||||
if event.method == "turn/completed":
|
||||
completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status))
|
||||
completed_status = getattr(
|
||||
event.payload.turn.status, "value", str(event.payload.turn.status)
|
||||
)
|
||||
|
||||
if saw_delta:
|
||||
print()
|
||||
|
||||
@@ -5,7 +5,12 @@ _EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(_EXAMPLES_ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(_EXAMPLES_ROOT))
|
||||
|
||||
from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config
|
||||
from _bootstrap import (
|
||||
assistant_text_from_turn,
|
||||
ensure_local_sdk_src,
|
||||
find_turn_by_id,
|
||||
runtime_config,
|
||||
)
|
||||
|
||||
ensure_local_sdk_src()
|
||||
|
||||
@@ -16,7 +21,9 @@ from openai_codex import AsyncCodex, TextInput
|
||||
|
||||
async def main() -> None:
|
||||
async with AsyncCodex(config=runtime_config()) as codex:
|
||||
original = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
original = await codex.thread_start(
|
||||
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
||||
)
|
||||
|
||||
first_turn = await original.turn(TextInput("Tell me one fact about Saturn."))
|
||||
_ = await first_turn.run()
|
||||
|
||||
@@ -5,7 +5,12 @@ _EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(_EXAMPLES_ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(_EXAMPLES_ROOT))
|
||||
|
||||
from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config
|
||||
from _bootstrap import (
|
||||
assistant_text_from_turn,
|
||||
ensure_local_sdk_src,
|
||||
find_turn_by_id,
|
||||
runtime_config,
|
||||
)
|
||||
|
||||
ensure_local_sdk_src()
|
||||
|
||||
|
||||
@@ -16,8 +16,12 @@ from openai_codex import AsyncCodex, TextInput
|
||||
|
||||
async def main() -> None:
|
||||
async with AsyncCodex(config=runtime_config()) as codex:
|
||||
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
first = await (await thread.turn(TextInput("One sentence about structured planning."))).run()
|
||||
thread = await codex.thread_start(
|
||||
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
||||
)
|
||||
first = await (
|
||||
await thread.turn(TextInput("One sentence about structured planning."))
|
||||
).run()
|
||||
second = await (await thread.turn(TextInput("Now restate it for a junior engineer."))).run()
|
||||
|
||||
reopened = await codex.thread_resume(thread.id)
|
||||
@@ -36,7 +40,9 @@ async def main() -> None:
|
||||
model="gpt-5.4",
|
||||
config={"model_reasoning_effort": "high"},
|
||||
)
|
||||
resumed_result = await (await resumed.turn(TextInput("Continue in one short sentence."))).run()
|
||||
resumed_result = await (
|
||||
await resumed.turn(TextInput("Continue in one short sentence."))
|
||||
).run()
|
||||
resumed_info = f"{resumed_result.id} {resumed_result.status}"
|
||||
except Exception as exc:
|
||||
resumed_info = f"skipped({type(exc).__name__})"
|
||||
@@ -44,7 +50,9 @@ async def main() -> None:
|
||||
forked_info = "n/a"
|
||||
try:
|
||||
forked = await codex.thread_fork(unarchived.id, model="gpt-5.4")
|
||||
forked_result = await (await forked.turn(TextInput("Take a different angle in one short sentence."))).run()
|
||||
forked_result = await (
|
||||
await forked.turn(TextInput("Take a different angle in one short sentence."))
|
||||
).run()
|
||||
forked_info = f"{forked_result.id} {forked_result.status}"
|
||||
except Exception as exc:
|
||||
forked_info = f"skipped({type(exc).__name__})"
|
||||
|
||||
@@ -11,7 +11,6 @@ ensure_local_sdk_src()
|
||||
|
||||
from openai_codex import Codex, TextInput
|
||||
|
||||
|
||||
with Codex(config=runtime_config()) as codex:
|
||||
thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
first = thread.turn(TextInput("One sentence about structured planning.")).run()
|
||||
@@ -41,7 +40,9 @@ with Codex(config=runtime_config()) as codex:
|
||||
forked_info = "n/a"
|
||||
try:
|
||||
forked = codex.thread_fork(unarchived.id, model="gpt-5.4")
|
||||
forked_result = forked.turn(TextInput("Take a different angle in one short sentence.")).run()
|
||||
forked_result = forked.turn(
|
||||
TextInput("Take a different angle in one short sentence.")
|
||||
).run()
|
||||
forked_info = f"{forked_result.id} {forked_result.status}"
|
||||
except Exception as exc:
|
||||
forked_info = f"skipped({type(exc).__name__})"
|
||||
|
||||
@@ -23,7 +23,9 @@ REMOTE_IMAGE_URL = "https://raw.githubusercontent.com/github/explore/main/topics
|
||||
|
||||
async def main() -> None:
|
||||
async with AsyncCodex(config=runtime_config()) as codex:
|
||||
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
thread = await codex.thread_start(
|
||||
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
||||
)
|
||||
turn = await thread.turn(
|
||||
[
|
||||
TextInput("What is in this image? Give 3 bullets."),
|
||||
|
||||
@@ -23,11 +23,15 @@ from openai_codex import AsyncCodex, LocalImageInput, TextInput
|
||||
async def main() -> None:
|
||||
with temporary_sample_image_path() as image_path:
|
||||
async with AsyncCodex(config=runtime_config()) as codex:
|
||||
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
thread = await codex.thread_start(
|
||||
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
||||
)
|
||||
|
||||
turn = await thread.turn(
|
||||
[
|
||||
TextInput("Read this generated local image and summarize the colors/layout in 2 bullets."),
|
||||
TextInput(
|
||||
"Read this generated local image and summarize the colors/layout in 2 bullets."
|
||||
),
|
||||
LocalImageInput(str(image_path.resolve())),
|
||||
]
|
||||
)
|
||||
|
||||
@@ -23,7 +23,9 @@ with temporary_sample_image_path() as image_path:
|
||||
|
||||
result = thread.turn(
|
||||
[
|
||||
TextInput("Read this generated local image and summarize the colors/layout in 2 bullets."),
|
||||
TextInput(
|
||||
"Read this generated local image and summarize the colors/layout in 2 bullets."
|
||||
),
|
||||
LocalImageInput(str(image_path.resolve())),
|
||||
]
|
||||
).run()
|
||||
|
||||
@@ -60,7 +60,9 @@ async def retry_on_overload_async(
|
||||
|
||||
async def main() -> None:
|
||||
async with AsyncCodex(config=runtime_config()) as codex:
|
||||
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
thread = await codex.thread_start(
|
||||
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
||||
)
|
||||
|
||||
try:
|
||||
result = await retry_on_overload_async(
|
||||
|
||||
@@ -45,7 +45,9 @@ async def main() -> None:
|
||||
print("Codex async mini CLI. Type /exit to quit.")
|
||||
|
||||
async with AsyncCodex(config=runtime_config()) as codex:
|
||||
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
thread = await codex.thread_start(
|
||||
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
||||
)
|
||||
print("Thread:", thread.id)
|
||||
|
||||
while True:
|
||||
|
||||
@@ -49,7 +49,9 @@ PROMPT = (
|
||||
|
||||
async def main() -> None:
|
||||
async with AsyncCodex(config=runtime_config()) as codex:
|
||||
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
thread = await codex.thread_start(
|
||||
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
||||
)
|
||||
|
||||
turn = await thread.turn(
|
||||
TextInput(PROMPT),
|
||||
@@ -64,12 +66,16 @@ async def main() -> None:
|
||||
try:
|
||||
structured = json.loads(structured_text)
|
||||
except json.JSONDecodeError as exc:
|
||||
raise RuntimeError(f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}") from exc
|
||||
raise RuntimeError(
|
||||
f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}"
|
||||
) from exc
|
||||
|
||||
summary = structured.get("summary")
|
||||
actions = structured.get("actions")
|
||||
if not isinstance(summary, str) or not isinstance(actions, list) or not all(
|
||||
isinstance(action, str) for action in actions
|
||||
if (
|
||||
not isinstance(summary, str)
|
||||
or not isinstance(actions, list)
|
||||
or not all(isinstance(action, str) for action in actions)
|
||||
):
|
||||
raise RuntimeError(
|
||||
f"Expected structured output with string summary/actions, got: {structured!r}"
|
||||
|
||||
@@ -60,14 +60,20 @@ with Codex(config=runtime_config()) as codex:
|
||||
try:
|
||||
structured = json.loads(structured_text)
|
||||
except json.JSONDecodeError as exc:
|
||||
raise RuntimeError(f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}") from exc
|
||||
raise RuntimeError(
|
||||
f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}"
|
||||
) from exc
|
||||
|
||||
summary = structured.get("summary")
|
||||
actions = structured.get("actions")
|
||||
if not isinstance(summary, str) or not isinstance(actions, list) or not all(
|
||||
isinstance(action, str) for action in actions
|
||||
if (
|
||||
not isinstance(summary, str)
|
||||
or not isinstance(actions, list)
|
||||
or not all(isinstance(action, str) for action in actions)
|
||||
):
|
||||
raise RuntimeError(f"Expected structured output with string summary/actions, got: {structured!r}")
|
||||
raise RuntimeError(
|
||||
f"Expected structured output with string summary/actions, got: {structured!r}"
|
||||
)
|
||||
|
||||
print("Status:", result.status)
|
||||
print("summary:", summary)
|
||||
|
||||
@@ -5,7 +5,12 @@ _EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(_EXAMPLES_ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(_EXAMPLES_ROOT))
|
||||
|
||||
from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config
|
||||
from _bootstrap import (
|
||||
assistant_text_from_turn,
|
||||
ensure_local_sdk_src,
|
||||
find_turn_by_id,
|
||||
runtime_config,
|
||||
)
|
||||
|
||||
ensure_local_sdk_src()
|
||||
|
||||
@@ -35,7 +40,9 @@ PREFERRED_MODEL = "gpt-5.4"
|
||||
|
||||
def _pick_highest_model(models):
|
||||
visible = [m for m in models if not m.hidden] or models
|
||||
preferred = next((m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL), None)
|
||||
preferred = next(
|
||||
(m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL), None
|
||||
)
|
||||
if preferred is not None:
|
||||
return preferred
|
||||
known_names = {m.id for m in visible} | {m.model for m in visible}
|
||||
@@ -100,7 +107,9 @@ async def main() -> None:
|
||||
first_persisted_turn = find_turn_by_id(persisted.thread.turns, first.id)
|
||||
|
||||
print("agent.message:", assistant_text_from_turn(first_persisted_turn))
|
||||
print("items:", 0 if first_persisted_turn is None else len(first_persisted_turn.items or []))
|
||||
print(
|
||||
"items:", 0 if first_persisted_turn is None else len(first_persisted_turn.items or [])
|
||||
)
|
||||
|
||||
second_turn = await thread.turn(
|
||||
TextInput("Return JSON for a safe feature-flag rollout plan."),
|
||||
@@ -117,7 +126,10 @@ async def main() -> None:
|
||||
second_persisted_turn = find_turn_by_id(persisted.thread.turns, second.id)
|
||||
|
||||
print("agent.message.params:", assistant_text_from_turn(second_persisted_turn))
|
||||
print("items.params:", 0 if second_persisted_turn is None else len(second_persisted_turn.items or []))
|
||||
print(
|
||||
"items.params:",
|
||||
0 if second_persisted_turn is None else len(second_persisted_turn.items or []),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -5,7 +5,12 @@ _EXAMPLES_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(_EXAMPLES_ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(_EXAMPLES_ROOT))
|
||||
|
||||
from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config
|
||||
from _bootstrap import (
|
||||
assistant_text_from_turn,
|
||||
ensure_local_sdk_src,
|
||||
find_turn_by_id,
|
||||
runtime_config,
|
||||
)
|
||||
|
||||
ensure_local_sdk_src()
|
||||
|
||||
@@ -33,7 +38,9 @@ PREFERRED_MODEL = "gpt-5.4"
|
||||
|
||||
def _pick_highest_model(models):
|
||||
visible = [m for m in models if not m.hidden] or models
|
||||
preferred = next((m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL), None)
|
||||
preferred = next(
|
||||
(m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL), None
|
||||
)
|
||||
if preferred is not None:
|
||||
return preferred
|
||||
known_names = {m.id for m in visible} | {m.model for m in visible}
|
||||
|
||||
@@ -20,8 +20,12 @@ from openai_codex import AsyncCodex, TextInput
|
||||
|
||||
async def main() -> None:
|
||||
async with AsyncCodex(config=runtime_config()) as codex:
|
||||
thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
steer_turn = await thread.turn(TextInput("Count from 1 to 40 with commas, then one summary sentence."))
|
||||
thread = await codex.thread_start(
|
||||
model="gpt-5.4", config={"model_reasoning_effort": "high"}
|
||||
)
|
||||
steer_turn = await thread.turn(
|
||||
TextInput("Count from 1 to 40 with commas, then one summary sentence.")
|
||||
)
|
||||
steer_result = "sent"
|
||||
try:
|
||||
_ = await steer_turn.steer(TextInput("Keep it brief and stop after 10 numbers."))
|
||||
@@ -35,11 +39,17 @@ async def main() -> None:
|
||||
steer_event_count += 1
|
||||
if event.method == "turn/completed":
|
||||
steer_completed_turn = event.payload.turn
|
||||
steer_completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status))
|
||||
steer_completed_status = getattr(
|
||||
event.payload.turn.status, "value", str(event.payload.turn.status)
|
||||
)
|
||||
|
||||
steer_preview = assistant_text_from_turn(steer_completed_turn).strip() or "[no assistant text]"
|
||||
steer_preview = (
|
||||
assistant_text_from_turn(steer_completed_turn).strip() or "[no assistant text]"
|
||||
)
|
||||
|
||||
interrupt_turn = await thread.turn(TextInput("Count from 1 to 200 with commas, then one summary sentence."))
|
||||
interrupt_turn = await thread.turn(
|
||||
TextInput("Count from 1 to 200 with commas, then one summary sentence.")
|
||||
)
|
||||
interrupt_result = "sent"
|
||||
try:
|
||||
_ = await interrupt_turn.interrupt()
|
||||
@@ -53,9 +63,13 @@ async def main() -> None:
|
||||
interrupt_event_count += 1
|
||||
if event.method == "turn/completed":
|
||||
interrupt_completed_turn = event.payload.turn
|
||||
interrupt_completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status))
|
||||
interrupt_completed_status = getattr(
|
||||
event.payload.turn.status, "value", str(event.payload.turn.status)
|
||||
)
|
||||
|
||||
interrupt_preview = assistant_text_from_turn(interrupt_completed_turn).strip() or "[no assistant text]"
|
||||
interrupt_preview = (
|
||||
assistant_text_from_turn(interrupt_completed_turn).strip() or "[no assistant text]"
|
||||
)
|
||||
|
||||
print("steer.result:", steer_result)
|
||||
print("steer.final.status:", steer_completed_status)
|
||||
|
||||
@@ -17,7 +17,9 @@ from openai_codex import Codex, TextInput
|
||||
|
||||
with Codex(config=runtime_config()) as codex:
|
||||
thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"})
|
||||
steer_turn = thread.turn(TextInput("Count from 1 to 40 with commas, then one summary sentence."))
|
||||
steer_turn = thread.turn(
|
||||
TextInput("Count from 1 to 40 with commas, then one summary sentence.")
|
||||
)
|
||||
steer_result = "sent"
|
||||
try:
|
||||
_ = steer_turn.steer(TextInput("Keep it brief and stop after 10 numbers."))
|
||||
@@ -31,11 +33,15 @@ with Codex(config=runtime_config()) as codex:
|
||||
steer_event_count += 1
|
||||
if event.method == "turn/completed":
|
||||
steer_completed_turn = event.payload.turn
|
||||
steer_completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status))
|
||||
steer_completed_status = getattr(
|
||||
event.payload.turn.status, "value", str(event.payload.turn.status)
|
||||
)
|
||||
|
||||
steer_preview = assistant_text_from_turn(steer_completed_turn).strip() or "[no assistant text]"
|
||||
|
||||
interrupt_turn = thread.turn(TextInput("Count from 1 to 200 with commas, then one summary sentence."))
|
||||
interrupt_turn = thread.turn(
|
||||
TextInput("Count from 1 to 200 with commas, then one summary sentence.")
|
||||
)
|
||||
interrupt_result = "sent"
|
||||
try:
|
||||
_ = interrupt_turn.interrupt()
|
||||
@@ -49,9 +55,13 @@ with Codex(config=runtime_config()) as codex:
|
||||
interrupt_event_count += 1
|
||||
if event.method == "turn/completed":
|
||||
interrupt_completed_turn = event.payload.turn
|
||||
interrupt_completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status))
|
||||
interrupt_completed_status = getattr(
|
||||
event.payload.turn.status, "value", str(event.payload.turn.status)
|
||||
)
|
||||
|
||||
interrupt_preview = assistant_text_from_turn(interrupt_completed_turn).strip() or "[no assistant text]"
|
||||
interrupt_preview = (
|
||||
assistant_text_from_turn(interrupt_completed_turn).strip() or "[no assistant text]"
|
||||
)
|
||||
|
||||
print("steer.result:", steer_result)
|
||||
print("steer.final.status:", steer_completed_status)
|
||||
|
||||
@@ -2,7 +2,6 @@ from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import importlib.util
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import zlib
|
||||
@@ -107,11 +106,15 @@ def temporary_sample_image_path() -> Iterator[Path]:
|
||||
def server_label(metadata: object) -> str:
|
||||
server = getattr(metadata, "serverInfo", None)
|
||||
server_name = ((getattr(server, "name", None) or "") if server is not None else "").strip()
|
||||
server_version = ((getattr(server, "version", None) or "") if server is not None else "").strip()
|
||||
server_version = (
|
||||
(getattr(server, "version", None) or "") if server is not None else ""
|
||||
).strip()
|
||||
if server_name and server_version:
|
||||
return f"{server_name} {server_version}"
|
||||
|
||||
user_agent = ((getattr(metadata, "userAgent", None) or "") if metadata is not None else "").strip()
|
||||
user_agent = (
|
||||
(getattr(metadata, "userAgent", None) or "") if metadata is not None else ""
|
||||
).strip()
|
||||
return user_agent or "unknown"
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user