mirror of
https://github.com/openai/codex.git
synced 2026-04-24 22:54:54 +00:00
Currently the config returned by `config/read` in untyped. Add types so
it's easier for client to parse the config. Since currently configs are
all defined in snake case we'll keep that instead of using camel case
like the rest of V2.
Sample output by testing using the app server test client:
```
{
< "id": "f28449f4-b015-459b-b07b-eef06980165d",
< "result": {
< "config": {
< "approvalPolicy": null,
< "compactPrompt": null,
< "developerInstructions": null,
< "features": {
< "experimental_use_rmcp_client": true
< },
< "forcedChatgptWorkspaceId": null,
< "forcedLoginMethod": null,
< "instructions": null,
< "model": "gpt-5.1-codex-max",
< "modelAutoCompactTokenLimit": null,
< "modelContextWindow": null,
< "modelProvider": null,
< "modelReasoningEffort": null,
< "modelReasoningSummary": null,
< "modelVerbosity": null,
< "model_providers": {
< "local": {
< "base_url": "http://localhost:8061/api/codex",
< "env_http_headers": {
< "ChatGPT-Account-ID": "OPENAI_ACCOUNT_ID"
< },
< "env_key": "CHATGPT_TOKEN_STAGING",
< "name": "local",
< "wire_api": "responses"
< }
< },
< "model_reasoning_effort": "medium",
< "notice": {
< "hide_gpt-5.1-codex-max_migration_prompt": true,
< "hide_gpt5_1_migration_prompt": true
< },
< "profile": null,
< "profiles": {},
< "projects": {
< "/Users/celia/code": {
< "trust_level": "trusted"
< },
< "/Users/celia/code/codex": {
< "trust_level": "trusted"
< },
< "/Users/celia/code/openai": {
< "trust_level": "trusted"
< }
< },
< "reviewModel": null,
< "sandboxMode": null,
< "sandboxWorkspaceWrite": null,
< "tools": {
< "viewImage": null,
< "webSearch": null
< }
< },
< "origins": {
< "features.experimental_use_rmcp_client": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< },
< "model": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< },
< "model_providers.local.base_url": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< },
< "model_providers.local.env_http_headers.ChatGPT-Account-ID": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< },
< "model_providers.local.env_key": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< },
< "model_providers.local.name": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< },
< "model_providers.local.wire_api": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< },
< "model_reasoning_effort": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< },
< "notice.hide_gpt-5.1-codex-max_migration_prompt": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< },
< "notice.hide_gpt5_1_migration_prompt": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< },
< "projects./Users/celia/code.trust_level": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< },
< "projects./Users/celia/code/codex.trust_level": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< },
< "projects./Users/celia/code/openai.trust_level": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< },
< "tools.web_search": {
< "name": "user",
< "source": "/Users/celia/.codex/config.toml",
< "version": "sha256:a1d8eaedb5d9db5dfdfa69f30fa9df2efec66bb4dd46aa67f149fcc67cd0711c"
< }
< }
< }
< }
```
412 lines
12 KiB
Rust
412 lines
12 KiB
Rust
use anyhow::Result;
|
|
use app_test_support::McpProcess;
|
|
use app_test_support::to_response;
|
|
use codex_app_server_protocol::AskForApproval;
|
|
use codex_app_server_protocol::ConfigBatchWriteParams;
|
|
use codex_app_server_protocol::ConfigEdit;
|
|
use codex_app_server_protocol::ConfigLayerName;
|
|
use codex_app_server_protocol::ConfigReadParams;
|
|
use codex_app_server_protocol::ConfigReadResponse;
|
|
use codex_app_server_protocol::ConfigValueWriteParams;
|
|
use codex_app_server_protocol::ConfigWriteResponse;
|
|
use codex_app_server_protocol::JSONRPCError;
|
|
use codex_app_server_protocol::JSONRPCResponse;
|
|
use codex_app_server_protocol::MergeStrategy;
|
|
use codex_app_server_protocol::RequestId;
|
|
use codex_app_server_protocol::SandboxMode;
|
|
use codex_app_server_protocol::ToolsV2;
|
|
use codex_app_server_protocol::WriteStatus;
|
|
use pretty_assertions::assert_eq;
|
|
use serde_json::json;
|
|
use std::path::PathBuf;
|
|
use tempfile::TempDir;
|
|
use tokio::time::timeout;
|
|
|
|
const DEFAULT_READ_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
|
|
|
|
fn write_config(codex_home: &TempDir, contents: &str) -> Result<()> {
|
|
Ok(std::fs::write(
|
|
codex_home.path().join("config.toml"),
|
|
contents,
|
|
)?)
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn config_read_returns_effective_and_layers() -> Result<()> {
|
|
let codex_home = TempDir::new()?;
|
|
write_config(
|
|
&codex_home,
|
|
r#"
|
|
model = "gpt-user"
|
|
sandbox_mode = "workspace-write"
|
|
"#,
|
|
)?;
|
|
|
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
|
|
|
let request_id = mcp
|
|
.send_config_read_request(ConfigReadParams {
|
|
include_layers: true,
|
|
})
|
|
.await?;
|
|
let resp: JSONRPCResponse = timeout(
|
|
DEFAULT_READ_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
|
)
|
|
.await??;
|
|
let ConfigReadResponse {
|
|
config,
|
|
origins,
|
|
layers,
|
|
} = to_response(resp)?;
|
|
|
|
assert_eq!(config.model.as_deref(), Some("gpt-user"));
|
|
assert_eq!(
|
|
origins.get("model").expect("origin").name,
|
|
ConfigLayerName::User
|
|
);
|
|
let layers = layers.expect("layers present");
|
|
assert_eq!(layers.len(), 2);
|
|
assert_eq!(layers[0].name, ConfigLayerName::SessionFlags);
|
|
assert_eq!(layers[1].name, ConfigLayerName::User);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn config_read_includes_tools() -> Result<()> {
|
|
let codex_home = TempDir::new()?;
|
|
write_config(
|
|
&codex_home,
|
|
r#"
|
|
model = "gpt-user"
|
|
|
|
[tools]
|
|
web_search = true
|
|
view_image = false
|
|
"#,
|
|
)?;
|
|
|
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
|
|
|
let request_id = mcp
|
|
.send_config_read_request(ConfigReadParams {
|
|
include_layers: true,
|
|
})
|
|
.await?;
|
|
let resp: JSONRPCResponse = timeout(
|
|
DEFAULT_READ_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
|
)
|
|
.await??;
|
|
let ConfigReadResponse {
|
|
config,
|
|
origins,
|
|
layers,
|
|
} = to_response(resp)?;
|
|
|
|
let tools = config.tools.expect("tools present");
|
|
assert_eq!(
|
|
tools,
|
|
ToolsV2 {
|
|
web_search: Some(true),
|
|
view_image: Some(false),
|
|
}
|
|
);
|
|
assert_eq!(
|
|
origins.get("tools.web_search").expect("origin").name,
|
|
ConfigLayerName::User
|
|
);
|
|
assert_eq!(
|
|
origins.get("tools.view_image").expect("origin").name,
|
|
ConfigLayerName::User
|
|
);
|
|
|
|
let layers = layers.expect("layers present");
|
|
assert_eq!(layers.len(), 2);
|
|
assert_eq!(layers[0].name, ConfigLayerName::SessionFlags);
|
|
assert_eq!(layers[1].name, ConfigLayerName::User);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn config_read_includes_system_layer_and_overrides() -> Result<()> {
|
|
let codex_home = TempDir::new()?;
|
|
write_config(
|
|
&codex_home,
|
|
r#"
|
|
model = "gpt-user"
|
|
approval_policy = "on-request"
|
|
sandbox_mode = "workspace-write"
|
|
|
|
[sandbox_workspace_write]
|
|
writable_roots = ["/user"]
|
|
network_access = true
|
|
"#,
|
|
)?;
|
|
|
|
let managed_path = codex_home.path().join("managed_config.toml");
|
|
std::fs::write(
|
|
&managed_path,
|
|
r#"
|
|
model = "gpt-system"
|
|
approval_policy = "never"
|
|
|
|
[sandbox_workspace_write]
|
|
writable_roots = ["/system"]
|
|
"#,
|
|
)?;
|
|
|
|
let managed_path_str = managed_path.display().to_string();
|
|
|
|
let mut mcp = McpProcess::new_with_env(
|
|
codex_home.path(),
|
|
&[("CODEX_MANAGED_CONFIG_PATH", Some(&managed_path_str))],
|
|
)
|
|
.await?;
|
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
|
|
|
let request_id = mcp
|
|
.send_config_read_request(ConfigReadParams {
|
|
include_layers: true,
|
|
})
|
|
.await?;
|
|
let resp: JSONRPCResponse = timeout(
|
|
DEFAULT_READ_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
|
|
)
|
|
.await??;
|
|
let ConfigReadResponse {
|
|
config,
|
|
origins,
|
|
layers,
|
|
} = to_response(resp)?;
|
|
|
|
assert_eq!(config.model.as_deref(), Some("gpt-system"));
|
|
assert_eq!(
|
|
origins.get("model").expect("origin").name,
|
|
ConfigLayerName::System
|
|
);
|
|
|
|
assert_eq!(config.approval_policy, Some(AskForApproval::Never));
|
|
assert_eq!(
|
|
origins.get("approval_policy").expect("origin").name,
|
|
ConfigLayerName::System
|
|
);
|
|
|
|
assert_eq!(config.sandbox_mode, Some(SandboxMode::WorkspaceWrite));
|
|
assert_eq!(
|
|
origins.get("sandbox_mode").expect("origin").name,
|
|
ConfigLayerName::User
|
|
);
|
|
|
|
let sandbox = config
|
|
.sandbox_workspace_write
|
|
.as_ref()
|
|
.expect("sandbox workspace write");
|
|
assert_eq!(sandbox.writable_roots, vec![PathBuf::from("/system")]);
|
|
assert_eq!(
|
|
origins
|
|
.get("sandbox_workspace_write.writable_roots.0")
|
|
.expect("origin")
|
|
.name,
|
|
ConfigLayerName::System
|
|
);
|
|
|
|
assert!(sandbox.network_access);
|
|
assert_eq!(
|
|
origins
|
|
.get("sandbox_workspace_write.network_access")
|
|
.expect("origin")
|
|
.name,
|
|
ConfigLayerName::User
|
|
);
|
|
|
|
let layers = layers.expect("layers present");
|
|
assert_eq!(layers.len(), 3);
|
|
assert_eq!(layers[0].name, ConfigLayerName::System);
|
|
assert_eq!(layers[1].name, ConfigLayerName::SessionFlags);
|
|
assert_eq!(layers[2].name, ConfigLayerName::User);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn config_value_write_replaces_value() -> Result<()> {
|
|
let codex_home = TempDir::new()?;
|
|
write_config(
|
|
&codex_home,
|
|
r#"
|
|
model = "gpt-old"
|
|
"#,
|
|
)?;
|
|
|
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
|
|
|
let read_id = mcp
|
|
.send_config_read_request(ConfigReadParams {
|
|
include_layers: false,
|
|
})
|
|
.await?;
|
|
let read_resp: JSONRPCResponse = timeout(
|
|
DEFAULT_READ_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(read_id)),
|
|
)
|
|
.await??;
|
|
let read: ConfigReadResponse = to_response(read_resp)?;
|
|
let expected_version = read.origins.get("model").map(|m| m.version.clone());
|
|
|
|
let write_id = mcp
|
|
.send_config_value_write_request(ConfigValueWriteParams {
|
|
file_path: None,
|
|
key_path: "model".to_string(),
|
|
value: json!("gpt-new"),
|
|
merge_strategy: MergeStrategy::Replace,
|
|
expected_version,
|
|
})
|
|
.await?;
|
|
let write_resp: JSONRPCResponse = timeout(
|
|
DEFAULT_READ_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(write_id)),
|
|
)
|
|
.await??;
|
|
let write: ConfigWriteResponse = to_response(write_resp)?;
|
|
let expected_file_path = codex_home
|
|
.path()
|
|
.join("config.toml")
|
|
.canonicalize()
|
|
.unwrap()
|
|
.display()
|
|
.to_string();
|
|
|
|
assert_eq!(write.status, WriteStatus::Ok);
|
|
assert_eq!(write.file_path, expected_file_path);
|
|
assert!(write.overridden_metadata.is_none());
|
|
|
|
let verify_id = mcp
|
|
.send_config_read_request(ConfigReadParams {
|
|
include_layers: false,
|
|
})
|
|
.await?;
|
|
let verify_resp: JSONRPCResponse = timeout(
|
|
DEFAULT_READ_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(verify_id)),
|
|
)
|
|
.await??;
|
|
let verify: ConfigReadResponse = to_response(verify_resp)?;
|
|
assert_eq!(verify.config.model.as_deref(), Some("gpt-new"));
|
|
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn config_value_write_rejects_version_conflict() -> Result<()> {
|
|
let codex_home = TempDir::new()?;
|
|
write_config(
|
|
&codex_home,
|
|
r#"
|
|
model = "gpt-old"
|
|
"#,
|
|
)?;
|
|
|
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
|
|
|
let write_id = mcp
|
|
.send_config_value_write_request(ConfigValueWriteParams {
|
|
file_path: Some(codex_home.path().join("config.toml").display().to_string()),
|
|
key_path: "model".to_string(),
|
|
value: json!("gpt-new"),
|
|
merge_strategy: MergeStrategy::Replace,
|
|
expected_version: Some("sha256:stale".to_string()),
|
|
})
|
|
.await?;
|
|
|
|
let err: JSONRPCError = timeout(
|
|
DEFAULT_READ_TIMEOUT,
|
|
mcp.read_stream_until_error_message(RequestId::Integer(write_id)),
|
|
)
|
|
.await??;
|
|
let code = err
|
|
.error
|
|
.data
|
|
.as_ref()
|
|
.and_then(|d| d.get("config_write_error_code"))
|
|
.and_then(|v| v.as_str());
|
|
assert_eq!(code, Some("configVersionConflict"));
|
|
|
|
Ok(())
|
|
}
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
async fn config_batch_write_applies_multiple_edits() -> Result<()> {
|
|
let codex_home = TempDir::new()?;
|
|
write_config(&codex_home, "")?;
|
|
|
|
let mut mcp = McpProcess::new(codex_home.path()).await?;
|
|
timeout(DEFAULT_READ_TIMEOUT, mcp.initialize()).await??;
|
|
|
|
let batch_id = mcp
|
|
.send_config_batch_write_request(ConfigBatchWriteParams {
|
|
file_path: Some(codex_home.path().join("config.toml").display().to_string()),
|
|
edits: vec![
|
|
ConfigEdit {
|
|
key_path: "sandbox_mode".to_string(),
|
|
value: json!("workspace-write"),
|
|
merge_strategy: MergeStrategy::Replace,
|
|
},
|
|
ConfigEdit {
|
|
key_path: "sandbox_workspace_write".to_string(),
|
|
value: json!({
|
|
"writable_roots": ["/tmp"],
|
|
"network_access": false
|
|
}),
|
|
merge_strategy: MergeStrategy::Replace,
|
|
},
|
|
],
|
|
expected_version: None,
|
|
})
|
|
.await?;
|
|
let batch_resp: JSONRPCResponse = timeout(
|
|
DEFAULT_READ_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(batch_id)),
|
|
)
|
|
.await??;
|
|
let batch_write: ConfigWriteResponse = to_response(batch_resp)?;
|
|
assert_eq!(batch_write.status, WriteStatus::Ok);
|
|
let expected_file_path = codex_home
|
|
.path()
|
|
.join("config.toml")
|
|
.canonicalize()
|
|
.unwrap()
|
|
.display()
|
|
.to_string();
|
|
assert_eq!(batch_write.file_path, expected_file_path);
|
|
|
|
let read_id = mcp
|
|
.send_config_read_request(ConfigReadParams {
|
|
include_layers: false,
|
|
})
|
|
.await?;
|
|
let read_resp: JSONRPCResponse = timeout(
|
|
DEFAULT_READ_TIMEOUT,
|
|
mcp.read_stream_until_response_message(RequestId::Integer(read_id)),
|
|
)
|
|
.await??;
|
|
let read: ConfigReadResponse = to_response(read_resp)?;
|
|
assert_eq!(read.config.sandbox_mode, Some(SandboxMode::WorkspaceWrite));
|
|
let sandbox = read
|
|
.config
|
|
.sandbox_workspace_write
|
|
.as_ref()
|
|
.expect("sandbox workspace write");
|
|
assert_eq!(sandbox.writable_roots, vec![PathBuf::from("/tmp")]);
|
|
assert!(!sandbox.network_access);
|
|
|
|
Ok(())
|
|
}
|