codex: revert Python SDK changes from PR #16274

This commit is contained in:
Eric Traut
2026-03-31 11:03:50 -06:00
parent 508c91c2c2
commit d6143dc64c
6 changed files with 31 additions and 205 deletions

View File

@@ -50,8 +50,6 @@ Properties/methods:
- `thread_unarchive(thread_id: str) -> Thread`
- `models(*, include_hidden: bool = False) -> ModelListResponse`
`personality` accepts either a raw string id or the built-in `Personality` enum values (`Personality.FRIENDLY`, `Personality.PRAGMATIC`, `Personality.NONE`).
Context manager:
```python

View File

@@ -452,22 +452,9 @@ def generate_v2_all() -> None:
],
cwd=sdk_root(),
)
_widen_generated_personality_type(out_path)
_normalize_generated_timestamps(out_path)
def _widen_generated_personality_type(out_path: Path) -> None:
source = out_path.read_text()
updated = re.sub(
r"class Personality\(Enum\):\n(?: .+\n)+",
"Personality = str\n\n",
source,
count=1,
)
if updated != source:
out_path.write_text(updated)
def _notification_specs() -> list[tuple[str, str]]:
server_notifications = json.loads(
(schema_root_dir() / "ServerNotification.json").read_text()
@@ -557,11 +544,6 @@ FIELD_ANNOTATION_OVERRIDES: dict[str, str] = {
# Keep public API typed without falling back to `Any`.
"config": "JsonObject",
"output_schema": "JsonObject",
"personality": "PersonalityLike",
}
FIELD_SERIALIZATION_OVERRIDES: dict[str, str] = {
"personality": "personality_value(personality)",
}
@@ -654,11 +636,7 @@ def _kw_signature_lines(fields: list[PublicFieldSpec]) -> list[str]:
def _model_arg_lines(
fields: list[PublicFieldSpec], *, indent: str = " "
) -> list[str]:
lines = []
for field in fields:
value = FIELD_SERIALIZATION_OVERRIDES.get(field.wire_name, field.py_name)
lines.append(f"{indent}{field.wire_name}={value},")
return lines
return [f"{indent}{field.wire_name}={field.py_name}," for field in fields]
def _replace_generated_block(source: str, block_name: str, body: str) -> str:

View File

@@ -16,6 +16,7 @@ from .errors import (
)
from .generated.v2_all import (
AskForApproval,
Personality,
PlanType,
ReasoningEffort,
ReasoningSummary,
@@ -35,7 +36,6 @@ from .generated.v2_all import (
TurnStatus,
TurnSteerParams,
)
from .personality import Personality
from .models import InitializeResponse
from .api import (
AsyncCodex,

View File

@@ -10,6 +10,7 @@ from .generated.v2_all import (
ApprovalsReviewer,
AskForApproval,
ModelListResponse,
Personality,
ReasoningEffort,
ReasoningSummary,
SandboxMode,
@@ -33,9 +34,6 @@ from .generated.v2_all import (
TurnSteerResponse,
)
from .models import InitializeResponse, JsonObject, Notification, ServerInfo
from .personality import Personality
from .personality import PersonalityLike
from .personality import personality_value
from ._inputs import (
ImageInput,
Input,
@@ -144,7 +142,7 @@ class Codex:
ephemeral: bool | None = None,
model: str | None = None,
model_provider: str | None = None,
personality: PersonalityLike | None = None,
personality: Personality | None = None,
sandbox: SandboxMode | None = None,
service_name: str | None = None,
service_tier: ServiceTier | None = None,
@@ -159,7 +157,7 @@ class Codex:
ephemeral=ephemeral,
model=model,
model_provider=model_provider,
personality=personality_value(personality),
personality=personality,
sandbox=sandbox,
service_name=service_name,
service_tier=service_tier,
@@ -203,7 +201,7 @@ class Codex:
developer_instructions: str | None = None,
model: str | None = None,
model_provider: str | None = None,
personality: PersonalityLike | None = None,
personality: Personality | None = None,
sandbox: SandboxMode | None = None,
service_tier: ServiceTier | None = None,
) -> Thread:
@@ -217,7 +215,7 @@ class Codex:
developer_instructions=developer_instructions,
model=model,
model_provider=model_provider,
personality=personality_value(personality),
personality=personality,
sandbox=sandbox,
service_tier=service_tier,
)
@@ -334,7 +332,7 @@ class AsyncCodex:
ephemeral: bool | None = None,
model: str | None = None,
model_provider: str | None = None,
personality: PersonalityLike | None = None,
personality: Personality | None = None,
sandbox: SandboxMode | None = None,
service_name: str | None = None,
service_tier: ServiceTier | None = None,
@@ -350,7 +348,7 @@ class AsyncCodex:
ephemeral=ephemeral,
model=model,
model_provider=model_provider,
personality=personality_value(personality),
personality=personality,
sandbox=sandbox,
service_name=service_name,
service_tier=service_tier,
@@ -395,7 +393,7 @@ class AsyncCodex:
developer_instructions: str | None = None,
model: str | None = None,
model_provider: str | None = None,
personality: PersonalityLike | None = None,
personality: Personality | None = None,
sandbox: SandboxMode | None = None,
service_tier: ServiceTier | None = None,
) -> AsyncThread:
@@ -410,7 +408,7 @@ class AsyncCodex:
developer_instructions=developer_instructions,
model=model,
model_provider=model_provider,
personality=personality_value(personality),
personality=personality,
sandbox=sandbox,
service_tier=service_tier,
)
@@ -481,7 +479,7 @@ class Thread:
effort: ReasoningEffort | None = None,
model: str | None = None,
output_schema: JsonObject | None = None,
personality: PersonalityLike | None = None,
personality: Personality | None = None,
sandbox_policy: SandboxPolicy | None = None,
service_tier: ServiceTier | None = None,
summary: ReasoningSummary | None = None,
@@ -494,7 +492,7 @@ class Thread:
effort=effort,
model=model,
output_schema=output_schema,
personality=personality_value(personality),
personality=personality,
sandbox_policy=sandbox_policy,
service_tier=service_tier,
summary=summary,
@@ -516,7 +514,7 @@ class Thread:
effort: ReasoningEffort | None = None,
model: str | None = None,
output_schema: JsonObject | None = None,
personality: PersonalityLike | None = None,
personality: Personality | None = None,
sandbox_policy: SandboxPolicy | None = None,
service_tier: ServiceTier | None = None,
summary: ReasoningSummary | None = None,
@@ -531,7 +529,7 @@ class Thread:
effort=effort,
model=model,
output_schema=output_schema,
personality=personality_value(personality),
personality=personality,
sandbox_policy=sandbox_policy,
service_tier=service_tier,
summary=summary,
@@ -565,7 +563,7 @@ class AsyncThread:
effort: ReasoningEffort | None = None,
model: str | None = None,
output_schema: JsonObject | None = None,
personality: PersonalityLike | None = None,
personality: Personality | None = None,
sandbox_policy: SandboxPolicy | None = None,
service_tier: ServiceTier | None = None,
summary: ReasoningSummary | None = None,
@@ -578,7 +576,7 @@ class AsyncThread:
effort=effort,
model=model,
output_schema=output_schema,
personality=personality_value(personality),
personality=personality,
sandbox_policy=sandbox_policy,
service_tier=service_tier,
summary=summary,
@@ -600,7 +598,7 @@ class AsyncThread:
effort: ReasoningEffort | None = None,
model: str | None = None,
output_schema: JsonObject | None = None,
personality: PersonalityLike | None = None,
personality: Personality | None = None,
sandbox_policy: SandboxPolicy | None = None,
service_tier: ServiceTier | None = None,
summary: ReasoningSummary | None = None,
@@ -616,7 +614,7 @@ class AsyncThread:
effort=effort,
model=model,
output_schema=output_schema,
personality=personality_value(personality),
personality=personality,
sandbox_policy=sandbox_policy,
service_tier=service_tier,
summary=summary,

View File

@@ -1358,16 +1358,6 @@ class ChatgptLoginAccountParams(BaseModel):
]
class ChatgptDeviceCodeLoginAccountParams(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
type: Annotated[
Literal["chatgptDeviceCode"],
Field(title="ChatgptDeviceCodev2::LoginAccountParamsType"),
]
class ChatgptAuthTokensLoginAccountParams(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
@@ -1403,7 +1393,6 @@ class LoginAccountParams(
RootModel[
ApiKeyLoginAccountParams
| ChatgptLoginAccountParams
| ChatgptDeviceCodeLoginAccountParams
| ChatgptAuthTokensLoginAccountParams
]
):
@@ -1413,7 +1402,6 @@ class LoginAccountParams(
root: Annotated[
ApiKeyLoginAccountParams
| ChatgptLoginAccountParams
| ChatgptDeviceCodeLoginAccountParams
| ChatgptAuthTokensLoginAccountParams,
Field(title="LoginAccountParams"),
]
@@ -1445,31 +1433,6 @@ class ChatgptLoginAccountResponse(BaseModel):
]
class ChatgptDeviceCodeLoginAccountResponse(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
login_id: Annotated[str, Field(alias="loginId")]
type: Annotated[
Literal["chatgptDeviceCode"],
Field(title="ChatgptDeviceCodev2::LoginAccountResponseType"),
]
user_code: Annotated[
str,
Field(
alias="userCode",
description="One-time code the user must enter after signing in.",
),
]
verification_url: Annotated[
str,
Field(
alias="verificationUrl",
description="URL the client should open in a browser to complete device code authorization.",
),
]
class ChatgptAuthTokensLoginAccountResponse(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
@@ -1484,7 +1447,6 @@ class LoginAccountResponse(
RootModel[
ApiKeyLoginAccountResponse
| ChatgptLoginAccountResponse
| ChatgptDeviceCodeLoginAccountResponse
| ChatgptAuthTokensLoginAccountResponse
]
):
@@ -1494,7 +1456,6 @@ class LoginAccountResponse(
root: Annotated[
ApiKeyLoginAccountResponse
| ChatgptLoginAccountResponse
| ChatgptDeviceCodeLoginAccountResponse
| ChatgptAuthTokensLoginAccountResponse,
Field(title="LoginAccountResponse"),
]
@@ -1697,74 +1658,28 @@ class NetworkAccess(Enum):
enabled = "enabled"
class NetworkDomainPermission(Enum):
allow = "allow"
deny = "deny"
class NetworkRequirements(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
allow_local_binding: Annotated[bool | None, Field(alias="allowLocalBinding")] = None
allow_unix_sockets: Annotated[
list[str] | None,
Field(
alias="allowUnixSockets",
description="Legacy compatibility view derived from `unix_sockets`.",
),
] = None
allow_unix_sockets: Annotated[list[str] | None, Field(alias="allowUnixSockets")] = (
None
)
allow_upstream_proxy: Annotated[bool | None, Field(alias="allowUpstreamProxy")] = (
None
)
allowed_domains: Annotated[
list[str] | None,
Field(
alias="allowedDomains",
description="Legacy compatibility view derived from `domains`.",
),
] = None
allowed_domains: Annotated[list[str] | None, Field(alias="allowedDomains")] = None
dangerously_allow_all_unix_sockets: Annotated[
bool | None, Field(alias="dangerouslyAllowAllUnixSockets")
] = None
dangerously_allow_non_loopback_proxy: Annotated[
bool | None, Field(alias="dangerouslyAllowNonLoopbackProxy")
] = None
denied_domains: Annotated[
list[str] | None,
Field(
alias="deniedDomains",
description="Legacy compatibility view derived from `domains`.",
),
] = None
domains: Annotated[
dict[str, Any] | None,
Field(
description="Canonical network permission map for `experimental_network`."
),
] = None
denied_domains: Annotated[list[str] | None, Field(alias="deniedDomains")] = None
enabled: bool | None = None
http_port: Annotated[int | None, Field(alias="httpPort", ge=0)] = None
managed_allowed_domains_only: Annotated[
bool | None,
Field(
alias="managedAllowedDomainsOnly",
description="When true, only managed allowlist entries are respected while managed network enforcement is active.",
),
] = None
socks_port: Annotated[int | None, Field(alias="socksPort", ge=0)] = None
unix_sockets: Annotated[
dict[str, Any] | None,
Field(
alias="unixSockets",
description="Canonical unix socket permission map for `experimental_network`.",
),
] = None
class NetworkUnixSocketPermission(Enum):
allow = "allow"
none = "none"
class NonSteerableTurnKind(Enum):
@@ -1810,17 +1725,10 @@ class PatchChangeKind(
root: AddPatchChangeKind | DeletePatchChangeKind | UpdatePatchChangeKind
class PersonalitiesListParams(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
cwds: list[AbsolutePathBuf] | None = None
class PersonalityScope(Enum):
builtin = "builtin"
user = "user"
repo = "repo"
class Personality(Enum):
none = "none"
friendly = "friendly"
pragmatic = "pragmatic"
class PlanDeltaNotification(BaseModel):
@@ -3357,7 +3265,7 @@ class ThreadResumeParams(BaseModel):
Field(description="Configuration overrides for the resumed thread, if any."),
] = None
model_provider: Annotated[str | None, Field(alias="modelProvider")] = None
personality: str | None = None
personality: Personality | None = None
sandbox: SandboxMode | None = None
service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None
thread_id: Annotated[str, Field(alias="threadId")]
@@ -3454,7 +3362,7 @@ class ThreadStartParams(BaseModel):
ephemeral: bool | None = None
model: str | None = None
model_provider: Annotated[str | None, Field(alias="modelProvider")] = None
personality: str | None = None
personality: Personality | None = None
sandbox: SandboxMode | None = None
service_name: Annotated[str | None, Field(alias="serviceName")] = None
service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None
@@ -4035,17 +3943,6 @@ class SkillsListRequest(BaseModel):
params: SkillsListParams
class PersonalitiesListRequest(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
id: RequestId
method: Annotated[
Literal["personalities/list"], Field(title="Personalities/listRequestMethod")
]
params: PersonalitiesListParams
class PluginListRequest(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
@@ -4976,16 +4873,6 @@ class OverriddenMetadata(BaseModel):
overriding_layer: Annotated[ConfigLayerMetadata, Field(alias="overridingLayer")]
class PersonalityMetadata(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
description: str
is_built_in: Annotated[bool, Field(alias="isBuiltIn")]
name: str
scope: PersonalityScope
class PluginDetail(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
@@ -5664,7 +5551,7 @@ class TurnStartParams(BaseModel):
),
] = None
personality: Annotated[
str | None,
Personality | None,
Field(
description="Override the personality for this turn and subsequent turns."
),
@@ -5974,21 +5861,6 @@ class ListMcpServerStatusResponse(BaseModel):
] = None
class PersonalitiesListEntry(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
cwd: str
personalities: list[PersonalityMetadata]
class PersonalitiesListResponse(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
)
data: list[PersonalitiesListEntry]
class PluginListResponse(BaseModel):
model_config = ConfigDict(
populate_by_name=True,
@@ -6280,7 +6152,6 @@ class ClientRequest(
| ThreadLoadedListRequest
| ThreadReadRequest
| SkillsListRequest
| PersonalitiesListRequest
| PluginListRequest
| PluginReadRequest
| AppListRequest
@@ -6346,7 +6217,6 @@ class ClientRequest(
| ThreadLoadedListRequest
| ThreadReadRequest
| SkillsListRequest
| PersonalitiesListRequest
| PluginListRequest
| PluginReadRequest
| AppListRequest

View File

@@ -1,18 +0,0 @@
from __future__ import annotations
from enum import Enum
class Personality(str, Enum):
NONE = "none"
FRIENDLY = "friendly"
PRAGMATIC = "pragmatic"
PersonalityLike = str | Personality
def personality_value(personality: PersonalityLike | None) -> str | None:
if isinstance(personality, Personality):
return personality.value
return personality