Compare commits

...

22 Commits

Author SHA1 Message Date
Ahmed Ibrahim
fc242eee29 codex: persist availability NUX by model 2026-02-26 19:47:00 -08:00
Ahmed Ibrahim
3a753e0adf Merge branch 'codex/show-nux-new-model-info' into codex/show-model-new-tip 2026-02-26 19:23:23 -08:00
Ahmed Ibrahim
4b9ad08102 codex: fix CI failure on PR #12972 2026-02-26 19:23:12 -08:00
Ahmed Ibrahim
0667409ec6 Use availability NUX messages in tui 2026-02-26 19:20:05 -08:00
Ahmed Ibrahim
1b5e47a5d4 Merge branch 'codex/show-nux-new-model-info' into codex/show-model-new-tip 2026-02-26 19:08:24 -08:00
Ahmed Ibrahim
0a1346175b Remove availability NUX ids 2026-02-26 19:08:18 -08:00
Ahmed Ibrahim
e9a02f64ef Merge branch 'codex/show-nux-new-model-info' into codex/show-model-new-tip
# Conflicts:
#	codex-rs/tui/src/chatwidget/tests.rs
2026-02-26 18:58:53 -08:00
Ahmed Ibrahim
09c3663773 Merge remote-tracking branch 'origin/main' into codex/show-nux-new-model-info
# Conflicts:
#	codex-rs/app-server-protocol/schema/json/v2/ModelListResponse.json
#	codex-rs/app-server-protocol/schema/typescript/v2/Model.ts
#	codex-rs/app-server-protocol/src/protocol/v2.rs
#	codex-rs/app-server/README.md
#	codex-rs/app-server/src/models.rs
#	codex-rs/app-server/tests/suite/v2/model_list.rs
2026-02-26 18:58:12 -08:00
Ahmed Ibrahim
01dd66c91f Merge remote-tracking branch 'origin/main' into codex/show-model-new-tip
# Conflicts:
#	codex-rs/app-server-protocol/schema/json/v2/ModelListResponse.json
#	codex-rs/app-server-protocol/schema/typescript/v2/Model.ts
#	codex-rs/app-server-protocol/src/protocol/v2.rs
#	codex-rs/app-server/README.md
#	codex-rs/app-server/src/models.rs
#	codex-rs/app-server/tests/suite/v2/model_list.rs
2026-02-26 18:57:24 -08:00
Ahmed Ibrahim
a998de775f Resolve app-server model payload merge drift 2026-02-26 18:56:13 -08:00
Ahmed Ibrahim
c5fa78fa81 Resolve app-server model payload merge drift 2026-02-26 18:56:02 -08:00
Ahmed Ibrahim
0fd10af916 Show model availability NUX tips in tui 2026-02-26 18:48:36 -08:00
Ahmed Ibrahim
3cec1f2220 Add model availability NUX metadata 2026-02-26 18:48:19 -08:00
Ahmed Ibrahim
3b0251757a Add model availability NUX metadata 2026-02-26 18:48:05 -08:00
Ahmed Ibrahim
d47407a2e5 codex: fix show_nux test initializers 2026-02-26 17:56:32 -08:00
Ahmed Ibrahim
d538a6bb41 Rename show_nux_new to show_nux in tui 2026-02-26 17:45:17 -08:00
Ahmed Ibrahim
d204d68c91 Rename show_nux_new to show_nux 2026-02-26 17:43:08 -08:00
Ahmed Ibrahim
f16ad6b82f Rename show_nux_new to show_nux 2026-02-26 17:42:53 -08:00
Ahmed Ibrahim
4abd8e9b24 codex: tweak show_nux_new wording (#12972) 2026-02-26 17:40:02 -08:00
Ahmed Ibrahim
7111a580b1 codex: tweak show_nux_new wording (#12972) 2026-02-26 17:39:57 -08:00
Ahmed Ibrahim
1fbd3add7a Show model-new startup tips in tui 2026-02-26 17:37:37 -08:00
Ahmed Ibrahim
a95c2e1eb9 Add show_nux_new to model info 2026-02-26 17:37:30 -08:00
32 changed files with 952 additions and 100 deletions

View File

@@ -10225,6 +10225,16 @@
},
"Model": {
"properties": {
"availabilityNux": {
"anyOf": [
{
"$ref": "#/definitions/v2/ModelAvailabilityNux"
},
{
"type": "null"
}
]
},
"defaultReasoningEffort": {
"$ref": "#/definitions/v2/ReasoningEffort"
},
@@ -10295,6 +10305,17 @@
],
"type": "object"
},
"ModelAvailabilityNux": {
"properties": {
"message": {
"type": "string"
}
},
"required": [
"message"
],
"type": "object"
},
"ModelListParams": {
"$schema": "http://json-schema.org/draft-07/schema#",
"properties": {

View File

@@ -22,6 +22,16 @@
},
"Model": {
"properties": {
"availabilityNux": {
"anyOf": [
{
"$ref": "#/definitions/ModelAvailabilityNux"
},
{
"type": "null"
}
]
},
"defaultReasoningEffort": {
"$ref": "#/definitions/ReasoningEffort"
},
@@ -92,6 +102,17 @@
],
"type": "object"
},
"ModelAvailabilityNux": {
"properties": {
"message": {
"type": "string"
}
},
"required": [
"message"
],
"type": "object"
},
"ModelUpgradeInfo": {
"properties": {
"migrationMarkdown": {

View File

@@ -3,7 +3,8 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { InputModality } from "../InputModality";
import type { ReasoningEffort } from "../ReasoningEffort";
import type { ModelAvailabilityNux } from "./ModelAvailabilityNux";
import type { ModelUpgradeInfo } from "./ModelUpgradeInfo";
import type { ReasoningEffortOption } from "./ReasoningEffortOption";
export type Model = { id: string, model: string, upgrade: string | null, upgradeInfo: ModelUpgradeInfo | null, displayName: string, description: string, hidden: boolean, supportedReasoningEfforts: Array<ReasoningEffortOption>, defaultReasoningEffort: ReasoningEffort, inputModalities: Array<InputModality>, supportsPersonality: boolean, isDefault: boolean, };
export type Model = { id: string, model: string, upgrade: string | null, upgradeInfo: ModelUpgradeInfo | null, availabilityNux: ModelAvailabilityNux | null, displayName: string, description: string, hidden: boolean, supportedReasoningEfforts: Array<ReasoningEffortOption>, defaultReasoningEffort: ReasoningEffort, inputModalities: Array<InputModality>, supportsPersonality: boolean, isDefault: boolean, };

View File

@@ -0,0 +1,5 @@
// GENERATED CODE! DO NOT MODIFY BY HAND!
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type ModelAvailabilityNux = { message: string, };

View File

@@ -107,6 +107,7 @@ export type { McpToolCallResult } from "./McpToolCallResult";
export type { McpToolCallStatus } from "./McpToolCallStatus";
export type { MergeStrategy } from "./MergeStrategy";
export type { Model } from "./Model";
export type { ModelAvailabilityNux } from "./ModelAvailabilityNux";
export type { ModelListParams } from "./ModelListParams";
export type { ModelListResponse } from "./ModelListResponse";
export type { ModelRerouteReason } from "./ModelRerouteReason";

View File

@@ -31,6 +31,7 @@ use codex_protocol::models::MessagePhase;
use codex_protocol::models::PermissionProfile as CorePermissionProfile;
use codex_protocol::models::ResponseItem;
use codex_protocol::openai_models::InputModality;
use codex_protocol::openai_models::ModelAvailabilityNux as CoreModelAvailabilityNux;
use codex_protocol::openai_models::ReasoningEffort;
use codex_protocol::openai_models::default_input_modalities;
use codex_protocol::parse_command::ParsedCommand as CoreParsedCommand;
@@ -1389,6 +1390,21 @@ pub struct ModelListParams {
pub include_hidden: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
pub struct ModelAvailabilityNux {
pub message: String,
}
impl From<CoreModelAvailabilityNux> for ModelAvailabilityNux {
fn from(value: CoreModelAvailabilityNux) -> Self {
Self {
message: value.message,
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export_to = "v2/")]
@@ -1397,6 +1413,7 @@ pub struct Model {
pub model: String,
pub upgrade: Option<String>,
pub upgrade_info: Option<ModelUpgradeInfo>,
pub availability_nux: Option<ModelAvailabilityNux>,
pub display_name: String,
pub description: String,
pub hidden: bool,

View File

@@ -142,7 +142,7 @@ Example with notification opt-out:
- `thread/realtime/stop` — stop the active realtime session for the thread (experimental); returns `{}`.
- `review/start` — kick off Codexs automated reviewer for a thread; responds like `turn/start` and emits `item/started`/`item/completed` notifications with `enteredReviewMode` and `exitedReviewMode` items, plus a final assistant `agentMessage` containing the review.
- `command/exec` — run a single command under the server sandbox without starting a thread/turn (handy for utilities and validation).
- `model/list` — list available models (set `includeHidden: true` to include entries with `hidden: true`), with reasoning effort options, optional legacy `upgrade` model ids, and optional `upgradeInfo` metadata (`model`, `upgradeCopy`, `modelLink`, `migrationMarkdown`).
- `model/list` — list available models (set `includeHidden: true` to include entries with `hidden: true`), with reasoning effort options, optional legacy `upgrade` model ids, optional `upgradeInfo` metadata (`model`, `upgradeCopy`, `modelLink`, `migrationMarkdown`), and optional `availabilityNux` metadata.
- `experimentalFeature/list` — list feature flags with stage metadata (`beta`, `underDevelopment`, `stable`, etc.), enabled/default-enabled state, and cursor pagination. For non-beta flags, `displayName`/`description`/`announcement` are `null`.
- `collaborationMode/list` — list available collaboration mode presets (experimental, no pagination). This response omits built-in developer instructions; clients should either pass `settings.developer_instructions: null` when setting a mode to use Codex's built-in instructions, or provide their own instructions explicitly.
- `skills/list` — list skills for one or more `cwd` values (optional `forceReload`).

View File

@@ -32,6 +32,7 @@ fn model_from_preset(preset: ModelPreset) -> Model {
model_link: upgrade.model_link.clone(),
migration_markdown: upgrade.migration_markdown.clone(),
}),
availability_nux: preset.availability_nux.map(Into::into),
display_name: preset.display_name.to_string(),
description: preset.description.to_string(),
hidden: !preset.show_in_picker,

View File

@@ -34,6 +34,7 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
default_reasoning_summary: ReasoningSummary::Auto,
support_verbosity: false,
default_verbosity: None,
availability_nux: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,

View File

@@ -31,6 +31,7 @@ fn model_from_preset(preset: &ModelPreset) -> Model {
model_link: upgrade.model_link.clone(),
migration_markdown: upgrade.migration_markdown.clone(),
}),
availability_nux: preset.availability_nux.clone().map(Into::into),
display_name: preset.display_name.clone(),
description: preset.description.clone(),
hidden: !preset.show_in_picker,
@@ -134,50 +135,6 @@ async fn list_models_includes_hidden_models() -> Result<()> {
Ok(())
}
#[tokio::test]
async fn list_models_returns_upgrade_info_metadata() -> Result<()> {
let codex_home = TempDir::new()?;
write_models_cache(codex_home.path())?;
let mut mcp = McpProcess::new(codex_home.path()).await?;
timeout(DEFAULT_TIMEOUT, mcp.initialize()).await??;
let request_id = mcp
.send_list_models_request(ModelListParams {
limit: Some(100),
cursor: None,
include_hidden: Some(true),
})
.await?;
let response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(request_id)),
)
.await??;
let ModelListResponse { data: items, .. } = to_response::<ModelListResponse>(response)?;
let item = items
.iter()
.find(|item| item.upgrade_info.is_some())
.expect("expected at least one model with upgrade info");
let upgrade_info = item
.upgrade_info
.as_ref()
.expect("expected upgrade info to be populated");
assert_eq!(item.upgrade.as_ref(), Some(&upgrade_info.model));
assert!(!upgrade_info.model.is_empty());
assert!(
upgrade_info.upgrade_copy.is_some()
|| upgrade_info.model_link.is_some()
|| upgrade_info.migration_markdown.is_some()
);
Ok(())
}
#[tokio::test]
async fn list_models_pagination_works() -> Result<()> {
let codex_home = TempDir::new()?;

View File

@@ -82,6 +82,7 @@ async fn models_client_hits_models_endpoint() {
default_reasoning_summary: ReasoningSummary::Auto,
support_verbosity: false,
default_verbosity: None,
availability_nux: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,

View File

@@ -809,6 +809,16 @@
"Notice": {
"description": "Settings for notices we display to users via the tui and app-server clients (primarily the Codex IDE extension). NOTE: these are different from notifications - notices are warnings, NUX screens, acknowledgements, etc.",
"properties": {
"availability_nux_display_counts": {
"additionalProperties": {
"format": "uint32",
"minimum": 0.0,
"type": "integer"
},
"default": {},
"description": "Tracks how many times the TUI actually displayed each availability NUX.",
"type": "object"
},
"hide_full_access_warning": {
"description": "Tracks whether the user has acknowledged the full access warning prompt.",
"type": "boolean"

View File

@@ -39,6 +39,8 @@ pub enum ConfigEdit {
SetNoticeHideModelMigrationPrompt(String, bool),
/// Record that a migration prompt was shown for an old->new model mapping.
RecordModelMigrationSeen { from: String, to: String },
/// Record that an availability NUX was displayed for a model.
RecordAvailabilityNuxDisplay { model: String },
/// Replace the entire `[mcp_servers]` table.
ReplaceMcpServers(BTreeMap<String, McpServerConfig>),
/// Set or clear a skill config entry under `[[skills.config]]`.
@@ -331,6 +333,21 @@ impl ConfigDocument {
&[Notice::TABLE_KEY, "model_migrations", from.as_str()],
value(to.clone()),
)),
ConfigEdit::RecordAvailabilityNuxDisplay { model } => {
let resolved = self.scoped_segments(
Scope::Global,
&[Notice::TABLE_KEY, "availability_nux_display_counts"],
);
let Some(table) = self.descend(&resolved, TraversalMode::Create) else {
return Ok(false);
};
let current = table
.get(model.as_str())
.and_then(toml_edit::Item::as_integer)
.unwrap_or(0);
table[model.as_str()] = value(current + 1);
Ok(true)
}
ConfigEdit::SetWindowsWslSetupAcknowledged(acknowledged) => Ok(self.write_value(
Scope::Global,
&["windows_wsl_setup_acknowledged"],
@@ -788,6 +805,13 @@ impl ConfigEditsBuilder {
self
}
pub fn record_availability_nux_display(mut self, model: &str) -> Self {
self.edits.push(ConfigEdit::RecordAvailabilityNuxDisplay {
model: model.to_string(),
});
self
}
pub fn set_windows_wsl_setup_acknowledged(mut self, acknowledged: bool) -> Self {
self.edits
.push(ConfigEdit::SetWindowsWslSetupAcknowledged(acknowledged));
@@ -1434,6 +1458,40 @@ gpt-5 = "gpt-5.1"
assert_eq!(contents, expected);
}
#[test]
fn blocking_record_availability_nux_display_increments_count() {
let tmp = tempdir().expect("tmpdir");
let codex_home = tmp.path();
std::fs::write(
codex_home.join(CONFIG_TOML_FILE),
r#"[notice]
existing = "value"
[notice.availability_nux_display_counts]
"spark" = 1
"#,
)
.expect("seed");
apply_blocking(
codex_home,
None,
&[ConfigEdit::RecordAvailabilityNuxDisplay {
model: "spark".to_string(),
}],
)
.expect("persist");
let contents =
std::fs::read_to_string(codex_home.join(CONFIG_TOML_FILE)).expect("read config");
let expected = r#"[notice]
existing = "value"
[notice.availability_nux_display_counts]
"spark" = 2
"#;
assert_eq!(contents, expected);
}
#[test]
fn blocking_replace_mcp_servers_round_trips() {
let tmp = tempdir().expect("tmpdir");

View File

@@ -722,6 +722,9 @@ pub struct Notice {
/// Tracks acknowledged model migrations as old->new model slug mappings.
#[serde(default)]
pub model_migrations: BTreeMap<String, String>,
/// Tracks how many times the TUI actually displayed each availability NUX.
#[serde(default)]
pub availability_nux_display_counts: BTreeMap<String, u32>,
}
impl Notice {

View File

@@ -69,6 +69,7 @@ pub(crate) fn model_info_from_slug(slug: &str) -> ModelInfo {
visibility: ModelVisibility::None,
supported_in_api: true,
priority: 99,
availability_nux: None,
upgrade: None,
base_instructions: BASE_INSTRUCTIONS.to_string(),
model_messages: local_personality_messages_for_slug(slug),

View File

@@ -237,6 +237,7 @@ async fn model_change_from_image_to_text_strips_prior_image_content() -> Result<
default_reasoning_summary: ReasoningSummary::Auto,
support_verbosity: false,
default_verbosity: None,
availability_nux: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,
@@ -395,6 +396,7 @@ async fn model_switch_to_smaller_model_updates_token_context_window() -> Result<
default_reasoning_summary: ReasoningSummary::Auto,
support_verbosity: false,
default_verbosity: None,
availability_nux: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,

View File

@@ -339,6 +339,7 @@ fn test_remote_model(slug: &str, priority: i32) -> ModelInfo {
default_reasoning_summary: ReasoningSummary::Auto,
support_verbosity: false,
default_verbosity: None,
availability_nux: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,

View File

@@ -602,6 +602,7 @@ async fn remote_model_friendly_personality_instructions_with_feature() -> anyhow
default_reasoning_summary: ReasoningSummary::Auto,
support_verbosity: false,
default_verbosity: None,
availability_nux: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,
@@ -710,6 +711,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() -
default_reasoning_summary: ReasoningSummary::Auto,
support_verbosity: false,
default_verbosity: None,
availability_nux: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,

View File

@@ -297,6 +297,7 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> {
default_reasoning_summary: ReasoningSummary::Auto,
support_verbosity: false,
default_verbosity: None,
availability_nux: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,
@@ -534,6 +535,7 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> {
default_reasoning_summary: ReasoningSummary::Auto,
support_verbosity: false,
default_verbosity: None,
availability_nux: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,
@@ -995,6 +997,7 @@ fn test_remote_model_with_policy(
default_reasoning_summary: ReasoningSummary::Auto,
support_verbosity: false,
default_verbosity: None,
availability_nux: None,
apply_patch_tool_type: None,
truncation_policy,
supports_parallel_tool_calls: false,

View File

@@ -401,6 +401,7 @@ async fn stdio_image_responses_are_sanitized_for_text_only_model() -> anyhow::Re
default_reasoning_summary: ReasoningSummary::Auto,
support_verbosity: false,
default_verbosity: None,
availability_nux: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,

View File

@@ -664,6 +664,7 @@ async fn view_image_tool_returns_unsupported_message_for_text_only_model() -> an
default_reasoning_summary: ReasoningSummary::Auto,
support_verbosity: false,
default_verbosity: None,
availability_nux: None,
apply_patch_tool_type: None,
truncation_policy: TruncationPolicyConfig::bytes(10_000),
supports_parallel_tool_calls: false,

View File

@@ -99,6 +99,11 @@ pub struct ModelUpgrade {
pub migration_markdown: Option<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema, PartialEq, Eq)]
pub struct ModelAvailabilityNux {
pub message: String,
}
/// Metadata describing a Codex-supported model.
#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema, PartialEq)]
pub struct ModelPreset {
@@ -123,6 +128,8 @@ pub struct ModelPreset {
pub upgrade: Option<ModelUpgrade>,
/// Whether this preset should appear in the picker UI.
pub show_in_picker: bool,
/// Availability NUX shown when this preset becomes accessible to the user.
pub availability_nux: Option<ModelAvailabilityNux>,
/// whether this model is supported in the api
pub supported_in_api: bool,
/// Input modalities accepted when composing user turns for this preset.
@@ -225,6 +232,7 @@ pub struct ModelInfo {
pub visibility: ModelVisibility,
pub supported_in_api: bool,
pub priority: i32,
pub availability_nux: Option<ModelAvailabilityNux>,
pub upgrade: Option<ModelInfoUpgrade>,
pub base_instructions: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
@@ -410,6 +418,7 @@ impl From<ModelInfo> for ModelPreset {
migration_markdown: Some(upgrade.migration_markdown.clone()),
}),
show_in_picker: info.visibility == ModelVisibility::List,
availability_nux: info.availability_nux,
supported_in_api: info.supported_in_api,
input_modalities: info.input_modalities,
}
@@ -495,6 +504,7 @@ mod tests {
visibility: ModelVisibility::List,
supported_in_api: true,
priority: 1,
availability_nux: None,
upgrade: None,
base_instructions: "base".to_string(),
model_messages: spec,
@@ -668,4 +678,57 @@ mod tests {
);
assert_eq!(personality_variables.get_personality_message(None), None);
}
#[test]
fn model_info_defaults_availability_nux_to_none_when_omitted() {
let model: ModelInfo = serde_json::from_value(serde_json::json!({
"slug": "test-model",
"display_name": "Test Model",
"description": null,
"supported_reasoning_levels": [],
"shell_type": "shell_command",
"visibility": "list",
"supported_in_api": true,
"priority": 1,
"upgrade": null,
"base_instructions": "base",
"model_messages": null,
"supports_reasoning_summaries": false,
"default_reasoning_summary": "auto",
"support_verbosity": false,
"default_verbosity": null,
"apply_patch_tool_type": null,
"truncation_policy": {
"mode": "bytes",
"limit": 10000
},
"supports_parallel_tool_calls": false,
"context_window": null,
"auto_compact_token_limit": null,
"effective_context_window_percent": 95,
"experimental_supported_tools": [],
"input_modalities": ["text", "image"],
"prefer_websockets": false
}))
.expect("deserialize model info");
assert_eq!(model.availability_nux, None);
}
#[test]
fn model_preset_preserves_availability_nux() {
let preset = ModelPreset::from(ModelInfo {
availability_nux: Some(ModelAvailabilityNux {
message: "Try Spark.".to_string(),
}),
..test_model(None)
});
assert_eq!(
preset.availability_nux,
Some(ModelAvailabilityNux {
message: "Try Spark.".to_string(),
})
);
}
}

View File

@@ -2743,6 +2743,24 @@ impl App {
));
}
}
AppEvent::PersistAvailabilityNuxDisplayed { model } => {
let count = self
.config
.notices
.availability_nux_display_counts
.entry(model.clone())
.or_default();
*count += 1;
if let Err(err) = ConfigEditsBuilder::new(&self.config.codex_home)
.record_availability_nux_display(model.as_str())
.apply()
.await
{
tracing::error!(error = %err, "failed to persist model-new startup tip display");
self.chat_widget
.add_error_message(format!("Failed to save model tip preference: {err}"));
}
}
AppEvent::OpenApprovalsPopup => {
self.chat_widget.open_approvals_popup();
}
@@ -3861,11 +3879,10 @@ mod tests {
rollout_path: Some(PathBuf::new()),
};
Arc::new(new_session_info(
app.chat_widget.config_ref(),
app.chat_widget.current_model(),
event,
is_first,
None,
Vec::new(),
)) as Arc<dyn HistoryCell>
};
@@ -3907,6 +3924,47 @@ mod tests {
rendered
}
fn render_session_info_snapshot(
requested_model: &str,
actual_model: &str,
is_first_event: bool,
startup_tips: &[&str],
) -> String {
let event = SessionConfiguredEvent {
session_id: ThreadId::new(),
forked_from_id: None,
thread_name: None,
model: actual_model.to_string(),
model_provider_id: "test-provider".to_string(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::new_read_only_policy(),
cwd: PathBuf::from("/tmp/project"),
reasoning_effort: Some(ReasoningEffortConfig::High),
history_log_id: 0,
history_entry_count: 0,
initial_messages: None,
network_proxy: None,
rollout_path: Some(PathBuf::new()),
};
let cell = new_session_info(
requested_model,
event,
is_first_event,
startup_tips.iter().map(|tip| (*tip).to_string()).collect(),
);
cell.display_lines(80)
.into_iter()
.map(|line| {
line.spans
.into_iter()
.map(|span| span.content)
.collect::<String>()
})
.collect::<Vec<_>>()
.join("\n")
}
#[tokio::test]
async fn clear_ui_after_long_transcript_snapshots_fresh_header_only() {
let rendered = render_clear_ui_header_after_long_transcript_for_snapshot().await;
@@ -3919,6 +3977,49 @@ mod tests {
assert_snapshot!("clear_ui_after_long_transcript_fresh_header_only", rendered);
}
#[test]
fn first_session_header_with_model_nux_matches_snapshot() {
let rendered = render_session_info_snapshot(
"gpt-test",
"gpt-test",
true,
&["New Spark is now available to you."],
);
assert_snapshot!("first_session_header_with_model_nux", rendered);
}
#[test]
fn first_session_header_with_multiple_model_nux_matches_snapshot() {
let rendered = render_session_info_snapshot(
"gpt-test",
"gpt-test",
true,
&[
"New Spark is now available to you.",
"New Canvas is now available to you.",
],
);
assert_snapshot!("first_session_header_with_multiple_model_nux", rendered);
}
#[test]
fn later_session_header_with_model_nux_matches_snapshot() {
let rendered = render_session_info_snapshot(
"gpt-test",
"gpt-test",
false,
&["New Spark is now available to you."],
);
assert_snapshot!("later_session_header_with_model_nux", rendered);
}
#[test]
fn later_session_header_after_model_nux_exhaustion_matches_snapshot() {
let rendered =
render_session_info_snapshot("requested-model", "resolved-model", false, &[]);
assert_snapshot!("later_session_header_after_model_nux_exhaustion", rendered);
}
async fn make_test_app() -> App {
let (chat_widget, app_event_tx, _rx, _op_rx) = make_chatwidget_manual_with_sender().await;
let config = chat_widget.config_ref().clone();
@@ -4358,11 +4459,10 @@ mod tests {
rollout_path: Some(PathBuf::new()),
};
Arc::new(new_session_info(
app.chat_widget.config_ref(),
app.chat_widget.current_model(),
event,
is_first,
None,
Vec::new(),
)) as Arc<dyn HistoryCell>
};

View File

@@ -335,6 +335,11 @@ pub(crate) enum AppEvent {
to_model: String,
},
/// Persist that an availability NUX was displayed.
PersistAvailabilityNuxDisplayed {
model: String,
},
/// Skip the next world-writable scan (one-shot) after a user-confirmed continue.
#[cfg_attr(not(target_os = "windows"), allow(dead_code))]
SkipNextWorldWritableScan,

View File

@@ -250,6 +250,7 @@ use crate::status::RateLimitSnapshotDisplay;
use crate::status_indicator_widget::STATUS_DETAILS_DEFAULT_MAX_LINES;
use crate::status_indicator_widget::StatusDetailsCapitalization;
use crate::text_formatting::truncate_text;
use crate::tooltips::StartupTip;
use crate::tui::FrameRequester;
mod interrupts;
use self::interrupts::InterruptManager;
@@ -1102,6 +1103,7 @@ impl ChatWidget {
// --- Small event handlers ---
fn on_session_configured(&mut self, event: codex_protocol::protocol::SessionConfiguredEvent) {
let requested_model = self.current_model().to_string();
self.bottom_pane
.set_history_metadata(event.history_log_id, event.history_entry_count);
self.set_skills(None);
@@ -1136,6 +1138,19 @@ impl ChatWidget {
self.last_copyable_output = None;
let forked_from_id = event.forked_from_id;
let model_for_header = event.model.clone();
let startup_tips = if self.config.show_tooltips {
let models = self.models_manager.try_list_models().unwrap_or_default();
crate::tooltips::get_startup_tips(
&models,
&self.config.notices.availability_nux_display_counts,
self.auth_manager
.auth_cached()
.and_then(|auth| auth.account_plan_type()),
self.show_welcome_banner,
)
} else {
Default::default()
};
self.session_header.set_model(&model_for_header);
self.current_collaboration_mode = self.current_collaboration_mode.with_updates(
Some(model_for_header.clone()),
@@ -1144,16 +1159,52 @@ impl ChatWidget {
);
self.refresh_model_display();
self.sync_personality_command_enabled();
let displayed_startup_tips = if self.show_welcome_banner {
startup_tips
.first_session_tips
.iter()
.map(StartupTip::message)
.collect()
} else {
startup_tips
.selected_tip
.iter()
.map(StartupTip::message)
.collect()
};
let session_info_cell = history_cell::new_session_info(
&self.config,
&model_for_header,
&requested_model,
event,
self.show_welcome_banner,
self.auth_manager
.auth_cached()
.and_then(|auth| auth.account_plan_type()),
displayed_startup_tips,
);
self.apply_session_info_cell(session_info_cell);
let displayed_availability_nux_models: Vec<String> = if self.show_welcome_banner {
startup_tips
.first_session_tips
.iter()
.filter_map(StartupTip::availability_nux_model)
.map(str::to_string)
.collect()
} else {
startup_tips
.selected_tip
.iter()
.filter_map(StartupTip::availability_nux_model)
.map(str::to_string)
.collect()
};
for model in displayed_availability_nux_models {
let count = self
.config
.notices
.availability_nux_display_counts
.entry(model.clone())
.or_default();
*count += 1;
self.app_event_tx
.send(AppEvent::PersistAvailabilityNuxDisplayed { model });
}
if let Some(messages) = initial_messages {
self.replay_initial_messages(messages);

View File

@@ -31,6 +31,7 @@ use codex_core::models_manager::collaboration_mode_presets::CollaborationModesCo
use codex_core::models_manager::manager::ModelsManager;
use codex_core::skills::model::SkillMetadata;
use codex_core::terminal::TerminalName;
use codex_core::test_support::construct_model_info_offline;
use codex_otel::OtelManager;
use codex_otel::RuntimeMetricsSummary;
use codex_protocol::ThreadId;
@@ -44,7 +45,9 @@ use codex_protocol::items::AgentMessageItem;
use codex_protocol::items::PlanItem;
use codex_protocol::items::TurnItem;
use codex_protocol::models::MessagePhase;
use codex_protocol::openai_models::ModelAvailabilityNux;
use codex_protocol::openai_models::ModelPreset;
use codex_protocol::openai_models::ModelsResponse;
use codex_protocol::openai_models::ReasoningEffortPreset;
use codex_protocol::openai_models::default_input_modalities;
use codex_protocol::parse_command::ParsedCommand;
@@ -80,6 +83,7 @@ use codex_protocol::protocol::PatchApplyStatus as CorePatchApplyStatus;
use codex_protocol::protocol::RateLimitWindow;
use codex_protocol::protocol::ReviewRequest;
use codex_protocol::protocol::ReviewTarget;
use codex_protocol::protocol::SessionConfiguredEvent;
use codex_protocol::protocol::SessionSource;
use codex_protocol::protocol::SkillScope;
use codex_protocol::protocol::StreamErrorEvent;
@@ -1838,6 +1842,104 @@ fn lines_to_single_string(lines: &[ratatui::text::Line<'static>]) -> String {
s
}
#[tokio::test]
async fn session_configured_first_session_shows_model_nux_and_increments_display_count() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1")).await;
let mut model_info = construct_model_info_offline("gpt-5.1", &chat.config);
model_info.display_name = "GPT Test".to_string();
model_info.description = Some("Fast, high-reliability coding model.".to_string());
model_info.availability_nux = Some(ModelAvailabilityNux {
message:
"*New* You're using **GPT Test**. Fast, high-reliability coding model. Use /model to compare or switch anytime."
.to_string(),
});
chat.models_manager = Arc::new(ModelsManager::new(
chat.config.codex_home.clone(),
chat.auth_manager.clone(),
Some(ModelsResponse {
models: vec![model_info],
}),
CollaborationModesConfig::default(),
));
chat.handle_codex_event(Event {
id: "configured".into(),
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: ThreadId::new(),
forked_from_id: None,
thread_name: None,
model: "gpt-5.1".to_string(),
model_provider_id: "test-provider".to_string(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::new_read_only_policy(),
cwd: PathBuf::from("/home/user/project"),
reasoning_effort: None,
history_log_id: 0,
history_entry_count: 0,
initial_messages: None,
network_proxy: None,
rollout_path: None,
}),
});
let cells = drain_insert_history(&mut rx);
let rendered = lines_to_single_string(cells.first().expect("session info cell"));
assert!(rendered.contains("To get started, describe a task"));
assert!(rendered.contains("You're using GPT Test."));
assert!(rendered.contains("compare or switch anytime."));
assert_eq!(
Some(&1),
chat.config
.notices
.availability_nux_display_counts
.get("gpt-5.1")
);
}
#[tokio::test]
async fn session_configured_ineligible_model_does_not_increment_display_count() {
let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(Some("gpt-5.1")).await;
let mut model_info = construct_model_info_offline("gpt-5.1", &chat.config);
model_info.availability_nux = None;
chat.models_manager = Arc::new(ModelsManager::new(
chat.config.codex_home.clone(),
chat.auth_manager.clone(),
Some(ModelsResponse {
models: vec![model_info],
}),
CollaborationModesConfig::default(),
));
chat.handle_codex_event(Event {
id: "configured".into(),
msg: EventMsg::SessionConfigured(SessionConfiguredEvent {
session_id: ThreadId::new(),
forked_from_id: None,
thread_name: None,
model: "gpt-5.1".to_string(),
model_provider_id: "test-provider".to_string(),
approval_policy: AskForApproval::Never,
sandbox_policy: SandboxPolicy::new_read_only_policy(),
cwd: PathBuf::from("/home/user/project"),
reasoning_effort: None,
history_log_id: 0,
history_entry_count: 0,
initial_messages: None,
network_proxy: None,
rollout_path: None,
}),
});
let _ = drain_insert_history(&mut rx);
assert_eq!(
None,
chat.config
.notices
.availability_nux_display_counts
.get("gpt-5.1")
);
}
fn make_token_info(total_tokens: i64, context_window: i64) -> TokenUsageInfo {
fn usage(total_tokens: i64) -> TokenUsage {
TokenUsage {
@@ -6077,6 +6179,7 @@ async fn model_picker_hides_show_in_picker_false_models_from_cache() {
is_default: false,
upgrade: None,
show_in_picker,
availability_nux: None,
supported_in_api: true,
input_modalities: default_input_modalities(),
};
@@ -6345,6 +6448,7 @@ async fn single_reasoning_option_skips_selection() {
is_default: false,
upgrade: None,
show_in_picker: true,
availability_nux: None,
supported_in_api: true,
input_modalities: default_input_modalities(),
};

View File

@@ -29,7 +29,6 @@ use crate::style::proposed_plan_style;
use crate::style::user_message_style;
use crate::text_formatting::format_and_truncate_tool_result;
use crate::text_formatting::truncate_text;
use crate::tooltips;
use crate::ui_consts::LIVE_PREFIX_COLS;
use crate::update_action::UpdateAction;
use crate::version::CODEX_CLI_VERSION;
@@ -41,7 +40,6 @@ use codex_core::config::Config;
use codex_core::config::types::McpServerTransportConfig;
use codex_core::web_search::web_search_detail;
use codex_otel::RuntimeMetricsSummary;
use codex_protocol::account::PlanType;
use codex_protocol::mcp::Resource;
use codex_protocol::mcp::ResourceTemplate;
use codex_protocol::models::WebSearchAction;
@@ -1037,24 +1035,20 @@ impl HistoryCell for SessionInfoCell {
}
pub(crate) fn new_session_info(
config: &Config,
requested_model: &str,
event: SessionConfiguredEvent,
is_first_event: bool,
auth_plan: Option<PlanType>,
startup_tips: Vec<String>,
) -> SessionInfoCell {
let SessionConfiguredEvent {
model,
cwd,
reasoning_effort,
..
} = event;
// Header box rendered as history (so it appears at the very top)
let header = SessionHeaderHistoryCell::new(
model.clone(),
reasoning_effort,
config.cwd.clone(),
CODEX_CLI_VERSION,
);
let header =
SessionHeaderHistoryCell::new(model.clone(), reasoning_effort, cwd, CODEX_CLI_VERSION);
let mut parts: Vec<Box<dyn HistoryCell>> = vec![Box::new(header)];
if is_first_event {
@@ -1092,11 +1086,12 @@ pub(crate) fn new_session_info(
];
parts.push(Box::new(PlainHistoryCell { lines: help_lines }));
for startup_tip in startup_tips {
parts.push(Box::new(TooltipHistoryCell::new(startup_tip)));
}
} else {
if config.show_tooltips
&& let Some(tooltips) = tooltips::get_tooltip(auth_plan).map(TooltipHistoryCell::new)
{
parts.push(Box::new(tooltips));
for startup_tip in startup_tips {
parts.push(Box::new(TooltipHistoryCell::new(startup_tip)));
}
if requested_model != model {
let lines = vec![

View File

@@ -0,0 +1,20 @@
---
source: tui/src/app.rs
expression: rendered
---
╭─────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ model: gpt-test high /model to change │
│ directory: /tmp/project │
╰─────────────────────────────────────────────╯
To get started, describe a task or try one of these commands:
/init - create an AGENTS.md file with instructions for Codex
/status - show current session configuration
/permissions - choose what Codex is allowed to do
/model - choose what model and reasoning effort to use
/review - review any changes and find issues
Tip: New Spark is now available to you.

View File

@@ -0,0 +1,22 @@
---
source: tui/src/app.rs
expression: rendered
---
╭─────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ model: gpt-test high /model to change │
│ directory: /tmp/project │
╰─────────────────────────────────────────────╯
To get started, describe a task or try one of these commands:
/init - create an AGENTS.md file with instructions for Codex
/status - show current session configuration
/permissions - choose what Codex is allowed to do
/model - choose what model and reasoning effort to use
/review - review any changes and find issues
Tip: New Spark is now available to you.
Tip: New Canvas is now available to you.

View File

@@ -0,0 +1,14 @@
---
source: tui/src/app.rs
expression: rendered
---
╭───────────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ model: resolved-model high /model to change │
│ directory: /tmp/project │
╰───────────────────────────────────────────────────╯
model changed:
requested: requested-model
used: resolved-model

View File

@@ -0,0 +1,12 @@
---
source: tui/src/app.rs
expression: rendered
---
╭─────────────────────────────────────────────╮
│ >_ OpenAI Codex (v0.0.0) │
│ │
│ model: gpt-test high /model to change │
│ directory: /tmp/project │
╰─────────────────────────────────────────────╯
Tip: New Spark is now available to you.

View File

@@ -1,7 +1,10 @@
use codex_core::features::FEATURES;
use codex_protocol::account::PlanType;
use codex_protocol::openai_models::ModelAvailabilityNux;
use codex_protocol::openai_models::ModelPreset;
use lazy_static::lazy_static;
use rand::Rng;
use std::collections::BTreeMap;
const ANNOUNCEMENT_TIP_URL: &str =
"https://raw.githubusercontent.com/openai/codex/main/announcement_tip.toml";
@@ -46,44 +49,171 @@ fn experimental_tooltips() -> Vec<&'static str> {
.collect()
}
/// Pick a random tooltip to show to the user when starting Codex.
pub(crate) fn get_tooltip(plan: Option<PlanType>) -> Option<String> {
let mut rng = rand::rng();
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) enum StartupTip {
Generic(String),
AvailabilityNux { model: String, message: String },
}
if let Some(announcement) = announcement::fetch_announcement_tip() {
impl StartupTip {
pub(crate) fn message(&self) -> String {
match self {
Self::Generic(message) | Self::AvailabilityNux { message, .. } => message.clone(),
}
}
pub(crate) fn availability_nux_model(&self) -> Option<&str> {
match self {
Self::Generic(_) => None,
Self::AvailabilityNux { model, .. } => Some(model.as_str()),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub(crate) struct StartupTips {
pub(crate) first_session_tips: Vec<StartupTip>,
pub(crate) selected_tip: Option<StartupTip>,
}
pub(crate) fn get_startup_tips(
models: &[ModelPreset],
availability_nux_display_counts: &BTreeMap<String, u32>,
plan: Option<PlanType>,
is_first_session: bool,
) -> StartupTips {
let mut rng = rand::rng();
get_startup_tips_with_rng(
models,
availability_nux_display_counts,
plan,
is_first_session,
announcement::fetch_announcement_tip(),
&mut rng,
)
}
fn get_startup_tips_with_rng<R: Rng + ?Sized>(
models: &[ModelPreset],
availability_nux_display_counts: &BTreeMap<String, u32>,
plan: Option<PlanType>,
is_first_session: bool,
announcement_tip: Option<String>,
rng: &mut R,
) -> StartupTips {
let availability_nux_tips = availability_nux_tips(models, availability_nux_display_counts);
if is_first_session {
return StartupTips {
first_session_tips: availability_nux_tips,
selected_tip: None,
};
}
if availability_nux_tips.is_empty() {
return StartupTips {
first_session_tips: Vec::new(),
selected_tip: get_generic_tooltip_with_rng(plan, announcement_tip, rng)
.map(StartupTip::Generic),
};
}
let mut weighted_candidates = Vec::new();
for availability_nux_tip in &availability_nux_tips {
weighted_candidates.push(availability_nux_tip.clone());
weighted_candidates.push(availability_nux_tip.clone());
weighted_candidates.push(availability_nux_tip.clone());
weighted_candidates.push(availability_nux_tip.clone());
}
if let Some(announcement_tip) = announcement_tip {
weighted_candidates.push(StartupTip::Generic(announcement_tip));
}
if let Some(plan_tip) = plan_tooltip(plan) {
weighted_candidates.push(StartupTip::Generic(plan_tip.to_string()));
}
if let Some(random_tip) = pick_tooltip(rng) {
weighted_candidates.push(StartupTip::Generic(random_tip.to_string()));
}
StartupTips {
first_session_tips: Vec::new(),
selected_tip: weighted_candidates
.get(rng.random_range(0..weighted_candidates.len()))
.cloned(),
}
}
fn get_generic_tooltip_with_rng<R: Rng + ?Sized>(
plan: Option<PlanType>,
announcement_tip: Option<String>,
rng: &mut R,
) -> Option<String> {
if let Some(announcement) = announcement_tip {
return Some(announcement);
}
// Leave small chance for a random tooltip to be shown.
if rng.random_ratio(8, 10) {
match plan {
Some(PlanType::Plus)
| Some(PlanType::Business)
| Some(PlanType::Team)
| Some(PlanType::Enterprise)
| Some(PlanType::Pro) => {
let tooltip = if IS_MACOS {
PAID_TOOLTIP
} else {
PAID_TOOLTIP_NON_MAC
};
return Some(tooltip.to_string());
}
Some(PlanType::Go) | Some(PlanType::Free) => {
return Some(FREE_GO_TOOLTIP.to_string());
}
_ => {
let tooltip = if IS_MACOS {
OTHER_TOOLTIP
} else {
OTHER_TOOLTIP_NON_MAC
};
return Some(tooltip.to_string());
}
}
if rng.random_ratio(8, 10)
&& let Some(tooltip) = plan_tooltip(plan)
{
return Some(tooltip.to_string());
}
pick_tooltip(&mut rng).map(str::to_string)
pick_tooltip(rng).map(str::to_string)
}
fn plan_tooltip(plan: Option<PlanType>) -> Option<&'static str> {
match plan {
Some(PlanType::Plus)
| Some(PlanType::Business)
| Some(PlanType::Team)
| Some(PlanType::Enterprise)
| Some(PlanType::Pro)
| Some(PlanType::Edu) => Some(if IS_MACOS {
PAID_TOOLTIP
} else {
PAID_TOOLTIP_NON_MAC
}),
Some(PlanType::Go) | Some(PlanType::Free) => Some(FREE_GO_TOOLTIP),
Some(PlanType::Unknown) | None => Some(if IS_MACOS {
OTHER_TOOLTIP
} else {
OTHER_TOOLTIP_NON_MAC
}),
}
}
fn availability_nux_tips(
models: &[ModelPreset],
availability_nux_display_counts: &BTreeMap<String, u32>,
) -> Vec<StartupTip> {
models
.iter()
.filter_map(|preset| {
preset
.availability_nux
.as_ref()
.map(|availability_nux| (preset.model.as_str(), availability_nux))
})
.filter(|(model, _)| {
availability_nux_display_counts
.get(*model)
.copied()
.unwrap_or(0)
< 4
})
.map(|(model, availability_nux)| startup_tip_from_availability_nux(model, availability_nux))
.collect()
}
fn startup_tip_from_availability_nux(
model: &str,
availability_nux: &ModelAvailabilityNux,
) -> StartupTip {
StartupTip::AvailabilityNux {
model: model.to_string(),
message: availability_nux.message.clone(),
}
}
fn pick_tooltip<R: Rng + ?Sized>(rng: &mut R) -> Option<&'static str> {
@@ -247,6 +377,31 @@ mod tests {
use rand::SeedableRng;
use rand::rngs::StdRng;
fn model_preset(
model: &str,
display_name: &str,
availability_nux: Option<&str>,
description: &str,
) -> ModelPreset {
ModelPreset {
id: model.to_string(),
model: model.to_string(),
display_name: display_name.to_string(),
description: description.to_string(),
default_reasoning_effort: codex_protocol::openai_models::ReasoningEffort::Medium,
supported_reasoning_efforts: vec![],
supports_personality: false,
is_default: false,
upgrade: None,
show_in_picker: true,
availability_nux: availability_nux.map(|message| ModelAvailabilityNux {
message: message.to_string(),
}),
supported_in_api: true,
input_modalities: codex_protocol::openai_models::default_input_modalities(),
}
}
#[test]
fn random_tooltip_returns_some_tip_when_available() {
let mut rng = StdRng::seed_from_u64(42);
@@ -363,4 +518,207 @@ content = "This is a test announcement"
parse_announcement_tip_toml(toml)
);
}
#[test]
fn first_session_eligible_model_returns_all_availability_nux_tips() {
let preset = model_preset(
"gpt-test",
"GPT Test",
Some("*New* Spark is now available to you."),
"Fast, high-reliability coding model.",
);
let mut rng = StdRng::seed_from_u64(1);
let tips = get_startup_tips_with_rng(
&[preset],
&BTreeMap::new(),
Some(PlanType::Plus),
true,
Some("announcement".to_string()),
&mut rng,
);
assert_eq!(
StartupTips {
first_session_tips: vec![StartupTip::AvailabilityNux {
model: "gpt-test".to_string(),
message: "*New* Spark is now available to you.".to_string(),
}],
selected_tip: None,
},
tips
);
}
#[test]
fn first_session_ineligible_model_skips_tip() {
let preset = model_preset("gpt-test", "GPT Test", None, "");
let mut rng = StdRng::seed_from_u64(1);
let tips = get_startup_tips_with_rng(
&[preset],
&BTreeMap::new(),
Some(PlanType::Plus),
true,
Some("announcement".to_string()),
&mut rng,
);
assert_eq!(StartupTips::default(), tips);
}
#[test]
fn first_session_returns_multiple_availability_nuxes() {
let mut rng = StdRng::seed_from_u64(1);
let models = vec![
model_preset(
"spark",
"Spark",
Some("*New* Spark is now available to you."),
"",
),
model_preset(
"canvas",
"Canvas",
Some("*New* Canvas is now available to you."),
"",
),
];
let tips = get_startup_tips_with_rng(
&models,
&BTreeMap::new(),
Some(PlanType::Plus),
true,
Some("announcement".to_string()),
&mut rng,
);
assert_eq!(
StartupTips {
first_session_tips: vec![
StartupTip::AvailabilityNux {
model: "spark".to_string(),
message: "*New* Spark is now available to you.".to_string(),
},
StartupTip::AvailabilityNux {
model: "canvas".to_string(),
message: "*New* Canvas is now available to you.".to_string(),
},
],
selected_tip: None,
},
tips
);
}
#[test]
fn later_session_can_select_availability_nux_from_weighted_pool() {
let preset = model_preset(
"gpt-test",
"GPT Test",
Some("*New* Spark is now available to you."),
"",
);
let mut rng = StdRng::seed_from_u64(5);
let tips = get_startup_tips_with_rng(
&[preset],
&BTreeMap::new(),
Some(PlanType::Plus),
false,
Some("announcement".to_string()),
&mut rng,
);
assert_eq!(
StartupTips {
first_session_tips: Vec::new(),
selected_tip: Some(StartupTip::AvailabilityNux {
model: "gpt-test".to_string(),
message: "*New* Spark is now available to you.".to_string(),
}),
},
tips
);
}
#[test]
fn later_session_count_limit_disables_availability_nux() {
let preset = model_preset(
"gpt-test",
"GPT Test",
Some("*New* Spark is now available to you."),
"",
);
let counts = BTreeMap::from([("gpt-test".to_string(), 4)]);
let mut rng = StdRng::seed_from_u64(5);
let tips = get_startup_tips_with_rng(
&[preset],
&counts,
Some(PlanType::Plus),
false,
Some("announcement".to_string()),
&mut rng,
);
assert_eq!(
StartupTips {
first_session_tips: Vec::new(),
selected_tip: Some(StartupTip::Generic("announcement".to_string())),
},
tips
);
}
#[test]
fn later_session_eligible_model_includes_announcement_and_generic_candidates() {
let preset = model_preset(
"gpt-test",
"GPT Test",
Some("*New* Spark is now available to you."),
"",
);
let mut saw_announcement = false;
let mut saw_plan_tip = false;
let mut saw_random_tip = false;
let mut saw_availability_nux_tip = false;
for seed in 0..64 {
let mut rng = StdRng::seed_from_u64(seed);
let tip = get_startup_tips_with_rng(
std::slice::from_ref(&preset),
&BTreeMap::new(),
Some(PlanType::Plus),
false,
Some("announcement".to_string()),
&mut rng,
)
.selected_tip
.expect("tip");
match tip {
StartupTip::Generic(message) if message == "announcement" => {
saw_announcement = true;
}
StartupTip::Generic(message)
if message == plan_tooltip(Some(PlanType::Plus)).unwrap() =>
{
saw_plan_tip = true;
}
StartupTip::Generic(_) => {
saw_random_tip = true;
}
StartupTip::AvailabilityNux { .. } => {
saw_availability_nux_tip = true;
}
}
}
assert!(saw_announcement);
assert!(saw_plan_tip);
assert!(saw_random_tip);
assert!(saw_availability_nux_tip);
}
}