Compare commits

...

26 Commits

Author SHA1 Message Date
jimmyfraiture
7fc9b33304 Add one test 2025-10-01 17:23:27 +01:00
jif-oai
f860879c92 Merge branch 'main' into jif/auto-compaction-flag 2025-10-01 12:06:21 +01:00
jimmyfraiture
026ad6e3f9 Process comment 2025-10-01 11:01:25 +01:00
jimmyfraiture
1e69213e3f Move to 15 2025-10-01 10:50:55 +01:00
jimmyfraiture
154e93c290 Try to replace macos version 2025-10-01 10:49:22 +01:00
jimmyfraiture
b41adf1cc5 V1 2025-10-01 10:25:59 +01:00
jimmyfraiture
16abbe34f8 FMT 2025-10-01 10:16:58 +01:00
jimmyfraiture
c1fa666186 Fix merge 2025-10-01 10:13:30 +01:00
jimmyfraiture
4c03946dcc Merge remote-tracking branch 'origin/main' into jif/sandbox-1
# Conflicts:
#	codex-rs/app-server/tests/suite/codex_message_processor_flow.rs
2025-10-01 09:47:28 +01:00
jimmyfraiture
c5cf9535e3 Fix test 2025-09-30 18:35:00 +01:00
jimmyfraiture
60e7333575 Add integration tests 2025-09-30 17:50:30 +01:00
jimmyfraiture
3def127178 Clippy 2025-09-30 13:31:48 +01:00
jimmyfraiture
5c00e1596a Restore otel 2025-09-30 13:28:25 +01:00
jimmyfraiture
9c194dc0f9 Fix merge 2025-09-30 12:56:30 +01:00
jimmyfraiture
4533dceafa Merge remote-tracking branch 'origin/main' into jif/sandbox-1
# Conflicts:
#	codex-rs/core/src/codex.rs
2025-09-30 12:45:23 +01:00
jimmyfraiture
43c0abb31e RV 6 2025-09-30 12:42:36 +01:00
jimmyfraiture
8c09db17c3 RV 5 2025-09-30 12:04:44 +01:00
jimmyfraiture
1d87628d41 RV 4 2025-09-30 11:55:43 +01:00
jimmyfraiture
4656160e31 RV 3 2025-09-30 11:40:44 +01:00
jimmyfraiture
2dd226891a RV 2 2025-09-30 11:36:51 +01:00
jimmyfraiture
ed45f85209 RV 1 2025-09-30 11:27:23 +01:00
jimmyfraiture
5b74f10a7b Sandboxing iteration 2 2025-09-29 19:34:12 +01:00
jif-oai
7b6d8b60c9 Merge branch 'main' into jif/sandbox-1 2025-09-29 09:35:29 +01:00
jimmyfraiture
caab5a19ee Move some stuff around 2025-09-26 14:46:07 +02:00
jimmyfraiture
a29380cdff Isolate apply patch adapter 2025-09-26 14:02:38 +02:00
jimmyfraiture
805de19381 V1 2025-09-26 13:42:58 +02:00
8 changed files with 125 additions and 8 deletions

View File

@@ -99,10 +99,10 @@ jobs:
fail-fast: false
matrix:
include:
- runner: macos-14
- runner: macos-15
target: aarch64-apple-darwin
profile: dev
- runner: macos-14
- runner: macos-15
target: x86_64-apple-darwin
profile: dev
- runner: ubuntu-24.04
@@ -128,7 +128,7 @@ jobs:
# there could be release-only build errors we want to catch.
# Hopefully this also pre-populates the build cache to speed up
# releases.
- runner: macos-14
- runner: macos-15
target: aarch64-apple-darwin
profile: release
- runner: ubuntu-24.04

View File

@@ -58,9 +58,9 @@ jobs:
fail-fast: false
matrix:
include:
- runner: macos-14
- runner: macos-15
target: aarch64-apple-darwin
- runner: macos-14
- runner: macos-15
target: x86_64-apple-darwin
- runner: ubuntu-24.04
target: x86_64-unknown-linux-musl

View File

@@ -432,6 +432,9 @@ fn merge_resume_cli_flags(interactive: &mut TuiCli, resume_cli: TuiCli) {
if resume_cli.web_search {
interactive.web_search = true;
}
if resume_cli.disable_auto_compaction {
interactive.disable_auto_compaction = true;
}
if !resume_cli.images.is_empty() {
interactive.images = resume_cli.images;
}
@@ -567,6 +570,7 @@ mod tests {
"--oss",
"--full-auto",
"--search",
"--disable-auto-compaction",
"--sandbox",
"workspace-write",
"--ask-for-approval",
@@ -609,6 +613,18 @@ mod tests {
.iter()
.any(|p| p == std::path::Path::new("/tmp/b.png"));
assert!(has_a && has_b);
assert!(interactive.disable_auto_compaction);
assert!(!interactive.resume_picker);
assert!(!interactive.resume_last);
assert_eq!(interactive.resume_session_id.as_deref(), Some("sid"));
}
#[test]
fn resume_disable_auto_compaction_flag_merges() {
let interactive =
finalize_from_args(["codex", "resume", "sid", "--disable-auto-compaction"].as_ref());
assert!(interactive.disable_auto_compaction);
assert!(!interactive.resume_picker);
assert!(!interactive.resume_last);
assert_eq!(interactive.resume_session_id.as_deref(), Some("sid"));

View File

@@ -113,9 +113,12 @@ impl ModelClient {
}
pub fn get_auto_compact_token_limit(&self) -> Option<i64> {
self.config.model_auto_compact_token_limit.or_else(|| {
get_model_info(&self.config.model_family).and_then(|info| info.auto_compact_token_limit)
})
match self.config.model_auto_compact_token_limit {
Some(limit) if limit <= crate::config::AUTO_COMPACT_DISABLED => None,
Some(limit) => Some(limit),
None => get_model_info(&self.config.model_family)
.and_then(|info| info.auto_compact_token_limit),
}
}
/// Dispatches to either the Responses or Chat implementation depending on
@@ -912,7 +915,11 @@ fn try_parse_retry_after(err: &Error) -> Option<Duration> {
#[cfg(test)]
mod tests {
use super::*;
use crate::built_in_model_providers;
use crate::config::ConfigOverrides;
use crate::config::ConfigToml;
use serde_json::json;
use tempfile::TempDir;
use tokio::sync::mpsc;
use tokio_test::io::Builder as IoBuilder;
use tokio_util::io::ReaderStream;
@@ -998,6 +1005,31 @@ mod tests {
)
}
#[test]
fn auto_compaction_disabled_when_limit_sentinel() {
let codex_home = TempDir::new().expect("tempdir");
let mut config = Config::load_from_base_config_with_overrides(
ConfigToml::default(),
ConfigOverrides::default(),
codex_home.path().to_path_buf(),
)
.expect("load default config");
config.model_auto_compact_token_limit = Some(crate::config::AUTO_COMPACT_DISABLED);
let provider = built_in_model_providers()["openai"].clone();
let client = ModelClient::new(
Arc::new(config),
None,
otel_event_manager(),
provider,
None,
ReasoningSummaryConfig::default(),
ConversationId::new(),
);
assert!(client.get_auto_compact_token_limit().is_none());
}
// ────────────────────────────
// Tests from `implement-test-for-responses-api-sse-parser`
// ────────────────────────────

View File

@@ -45,6 +45,8 @@ use toml_edit::Table as TomlTable;
const OPENAI_DEFAULT_MODEL: &str = "gpt-5-codex";
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5-codex";
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5-codex";
/// Sentinel value signalling auto-compaction should be fully disabled.
pub const AUTO_COMPACT_DISABLED: i64 = -1;
/// Maximum number of bytes of the documentation that will be embedded. Larger
/// files are *silently truncated* to this size so we do not take up too much of

View File

@@ -3,6 +3,7 @@ use codex_core::ConversationManager;
use codex_core::ModelProviderInfo;
use codex_core::NewConversation;
use codex_core::built_in_model_providers;
use codex_core::config::AUTO_COMPACT_DISABLED;
use codex_core::protocol::ErrorEvent;
use codex_core::protocol::EventMsg;
use codex_core::protocol::InputItem;
@@ -828,3 +829,61 @@ async fn auto_compact_allows_multiple_attempts_when_interleaved_with_other_turn_
"second auto compact request should include the summarization prompt"
);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn auto_compact_respects_disabled_flag() {
skip_if_no_network!();
let server = start_mock_server().await;
let sse_body = sse(vec![
ev_assistant_message("m1", FIRST_REPLY),
ev_completed_with_tokens("r1", 500_000),
]);
Mock::given(method("POST"))
.and(path("/v1/responses"))
.respond_with(sse_response(sse_body))
.expect(1)
.mount(&server)
.await;
let model_provider = ModelProviderInfo {
base_url: Some(format!("{}/v1", server.uri())),
..built_in_model_providers()["openai"].clone()
};
let home = TempDir::new().unwrap();
let mut config = load_default_config_for_test(&home);
config.model_provider = model_provider;
config.model_auto_compact_token_limit = Some(AUTO_COMPACT_DISABLED);
let conversation_manager = ConversationManager::with_auth(CodexAuth::from_api_key("dummy"));
let codex = conversation_manager
.new_conversation(config)
.await
.unwrap()
.conversation;
codex
.submit(Op::UserInput {
items: vec![InputItem::Text {
text: FIRST_AUTO_MSG.into(),
}],
})
.await
.unwrap();
wait_for_event(&codex, |ev| matches!(ev, EventMsg::TaskComplete(_))).await;
let requests = server.received_requests().await.unwrap();
assert_eq!(
requests.len(),
1,
"disable flag should avoid additional summarization requests"
);
let body = std::str::from_utf8(&requests[0].body).unwrap();
assert!(
!body.contains("You have exceeded the maximum number of tokens"),
"auto compaction should remain disabled even when usage is high"
);
}

View File

@@ -72,6 +72,10 @@ pub struct Cli {
#[arg(long = "search", default_value_t = false)]
pub web_search: bool,
/// Disable automatic conversation history compaction triggered near the model token limit.
#[arg(long = "disable-auto-compaction", default_value_t = false)]
pub disable_auto_compaction: bool,
#[clap(skip)]
pub config_overrides: CliConfigOverrides,
}

View File

@@ -170,6 +170,10 @@ pub async fn run_main(
}
};
if cli.disable_auto_compaction {
config.model_auto_compact_token_limit = Some(codex_core::config::AUTO_COMPACT_DISABLED);
}
// we load config.toml here to determine project state.
#[allow(clippy::print_stderr)]
let config_toml = {