Fix context budgeting and compaction

- Budget now counts exact message tokens matching what assemble_api_messages
  sends, not raw string content. Eliminates undercounting from formatting
  overhead (journal headers, personality separators, working stack).

- Load journal before trimming so trim accounts for journal cost.

- Compact before every turn, not just after turn completion. Prevents
  agent_cycle surfaced memories from pushing context over budget.

- Move agent_cycle orchestration from Agent::turn to Mind::start_turn —
  surfaced memories and reflections now precede the user message.

- Move AgentCycleState from Agent to Mind — it's orchestration, not
  per-agent state. memory_scoring_in_flight and memory_scores stay on
  Agent where they belong.

- Tag DMN entries as ConversationEntry::Dmn — compaction evicts them
  first since they're ephemeral. Compaction also prefers evicting
  memories over conversation when memories exceed 50% of entry tokens.

- Kill /retry slash command.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-06 21:48:12 -04:00
parent c22b8c3a6f
commit d5e6f55da9
5 changed files with 194 additions and 170 deletions

View file

@ -59,8 +59,6 @@ pub fn trim_entries(
entries: &[ConversationEntry],
tokenizer: &CoreBPE,
) -> Vec<ConversationEntry> {
let count = |s: &str| tokenizer.encode_with_special_tokens(s).len();
// --- Phase 1: dedup memory entries by key (keep last) ---
let mut seen_keys: std::collections::HashMap<&str, usize> = std::collections::HashMap::new();
let mut drop_indices: std::collections::HashSet<usize> = std::collections::HashSet::new();
@ -79,31 +77,77 @@ pub fn trim_entries(
.collect();
// --- Phase 2: trim to fit context budget ---
// Everything in the context window is a message. Count them all,
// trim entries until the total fits.
let max_tokens = context_budget_tokens();
let identity_cost = count(&context.system_prompt)
+ context.personality.iter().map(|(_, c)| count(c)).sum::<usize>();
let journal_cost: usize = context.journal.iter().map(|e| count(&e.content)).sum();
let available = max_tokens
.saturating_sub(identity_cost)
.saturating_sub(journal_cost);
let count_msg = |m: &Message| msg_token_count(tokenizer, m);
let fixed_cost = count_msg(&Message::system(&context.system_prompt))
+ count_msg(&Message::user(context.render_context_message()))
+ count_msg(&Message::user(render_journal(&context.journal)));
let msg_costs: Vec<usize> = deduped.iter()
.map(|e| msg_token_count(tokenizer, e.message())).collect();
let total: usize = msg_costs.iter().sum();
.map(|e| count_msg(e.api_message())).collect();
let entry_total: usize = msg_costs.iter().sum();
let total: usize = fixed_cost + entry_total;
let mut skip = 0;
let mem_tokens: usize = deduped.iter().zip(&msg_costs)
.filter(|(e, _)| e.is_memory())
.map(|(_, &c)| c).sum();
let conv_tokens: usize = entry_total - mem_tokens;
dbglog!("[trim] max_tokens={} fixed={} mem={} conv={} total={} entries={}",
max_tokens, fixed_cost, mem_tokens, conv_tokens, total, deduped.len());
// Phase 2a: evict all DMN entries first — they're ephemeral
let mut drop = vec![false; deduped.len()];
let mut trimmed = total;
while trimmed > available && skip < deduped.len() {
trimmed -= msg_costs[skip];
skip += 1;
let mut cur_mem = mem_tokens;
for i in 0..deduped.len() {
if deduped[i].is_dmn() {
drop[i] = true;
trimmed -= msg_costs[i];
}
}
// Walk forward to user message boundary
while skip < deduped.len() && deduped[skip].message().role != Role::User {
skip += 1;
// Phase 2b: if memories > 50% of entries, evict oldest memories
if cur_mem > conv_tokens && trimmed > max_tokens {
for i in 0..deduped.len() {
if drop[i] { continue; }
if !deduped[i].is_memory() { continue; }
if cur_mem <= conv_tokens { break; }
if trimmed <= max_tokens { break; }
drop[i] = true;
trimmed -= msg_costs[i];
cur_mem -= msg_costs[i];
}
}
deduped[skip..].to_vec()
// Phase 2b: drop oldest entries until under budget
for i in 0..deduped.len() {
if trimmed <= max_tokens { break; }
if drop[i] { continue; }
drop[i] = true;
trimmed -= msg_costs[i];
}
// Walk forward to include complete conversation boundaries
let mut result: Vec<ConversationEntry> = Vec::new();
let mut skipping = true;
for (i, entry) in deduped.into_iter().enumerate() {
if skipping {
if drop[i] { continue; }
// Snap to user message boundary
if entry.message().role != Role::User { continue; }
skipping = false;
}
result.push(entry);
}
dbglog!("[trim] result={} trimmed_total={}", result.len(), trimmed);
result
}
/// Count the token footprint of a message using BPE tokenization.
@ -153,6 +197,8 @@ pub fn is_stream_error(err: &anyhow::Error) -> bool {
pub enum ConversationEntry {
Message(Message),
Memory { key: String, message: Message },
/// DMN heartbeat/autonomous prompt — evicted aggressively during compaction.
Dmn(Message),
}
// Custom serde: serialize Memory with a "memory_key" field added to the message,
@ -161,7 +207,7 @@ impl Serialize for ConversationEntry {
fn serialize<S: serde::Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
use serde::ser::SerializeMap;
match self {
Self::Message(m) => m.serialize(s),
Self::Message(m) | Self::Dmn(m) => m.serialize(s),
Self::Memory { key, message } => {
let json = serde_json::to_value(message).map_err(serde::ser::Error::custom)?;
let mut map = s.serialize_map(None)?;
@ -195,7 +241,7 @@ impl ConversationEntry {
/// Get the API message for sending to the model.
pub fn api_message(&self) -> &Message {
match self {
Self::Message(m) => m,
Self::Message(m) | Self::Dmn(m) => m,
Self::Memory { message, .. } => message,
}
}
@ -204,10 +250,14 @@ impl ConversationEntry {
matches!(self, Self::Memory { .. })
}
pub fn is_dmn(&self) -> bool {
matches!(self, Self::Dmn(_))
}
/// Get a reference to the inner message.
pub fn message(&self) -> &Message {
match self {
Self::Message(m) => m,
Self::Message(m) | Self::Dmn(m) => m,
Self::Memory { message, .. } => message,
}
}
@ -215,7 +265,7 @@ impl ConversationEntry {
/// Get a mutable reference to the inner message.
pub fn message_mut(&mut self) -> &mut Message {
match self {
Self::Message(m) => m,
Self::Message(m) | Self::Dmn(m) => m,
Self::Memory { message, .. } => message,
}
}
@ -232,6 +282,16 @@ pub struct ContextState {
pub entries: Vec<ConversationEntry>,
}
pub fn render_journal(entries: &[JournalEntry]) -> String {
if entries.is_empty() { return String::new(); }
let mut text = String::from("[Earlier — from your journal]\n\n");
for entry in entries {
use std::fmt::Write;
writeln!(text, "## {}\n{}\n", entry.timestamp.format("%Y-%m-%dT%H:%M"), entry.content).ok();
}
text
}
impl ContextState {
pub fn render_context_message(&self) -> String {
let mut parts: Vec<String> = self.personality.iter()