WIP: trim_entries dedup, context_window rename, compact simplification

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-02 15:58:03 -04:00
parent 809679b6ce
commit d419587c1b
3 changed files with 58 additions and 53 deletions

View file

@ -15,27 +15,49 @@ pub struct JournalEntry {
pub content: String,
}
/// Look up a model's context window size in tokens.
pub fn model_context_window(_model: &str) -> usize {
/// Context window size in tokens (from config).
pub fn context_window() -> usize {
crate::config::get().api_context_window
}
/// Context budget in tokens: 60% of the model's context window.
fn context_budget_tokens(model: &str) -> usize {
model_context_window(model) * 60 / 100
fn context_budget_tokens() -> usize {
context_window() * 60 / 100
}
/// Trim conversation to fit within the context budget.
/// Returns the trimmed conversation messages (oldest dropped first).
pub fn trim_conversation(
/// Dedup and trim conversation entries to fit within the context budget.
///
/// 1. Dedup: if the same memory key appears multiple times, keep only
/// the latest render (drop the earlier Memory entry and its
/// corresponding assistant tool_call message).
/// 2. Trim: drop oldest entries until the conversation fits, snapping
/// to user message boundaries.
pub fn trim_entries(
context: &ContextState,
conversation: &[Message],
model: &str,
entries: &[ConversationEntry],
tokenizer: &CoreBPE,
) -> Vec<Message> {
) -> Vec<ConversationEntry> {
let count = |s: &str| tokenizer.encode_with_special_tokens(s).len();
let max_tokens = context_budget_tokens(model);
// --- Phase 1: dedup memory entries by key (keep last) ---
let mut seen_keys: std::collections::HashMap<&str, usize> = std::collections::HashMap::new();
let mut drop_indices: std::collections::HashSet<usize> = std::collections::HashSet::new();
for (i, entry) in entries.iter().enumerate() {
if let ConversationEntry::Memory { key, .. } = entry {
if let Some(prev) = seen_keys.insert(key.as_str(), i) {
drop_indices.insert(prev);
}
}
}
let deduped: Vec<ConversationEntry> = entries.iter().enumerate()
.filter(|(i, _)| !drop_indices.contains(i))
.map(|(_, e)| e.clone())
.collect();
// --- Phase 2: trim to fit context budget ---
let max_tokens = context_budget_tokens();
let identity_cost = count(&context.system_prompt)
+ context.personality.iter().map(|(_, c)| count(c)).sum::<usize>();
let journal_cost: usize = context.journal.iter().map(|e| count(&e.content)).sum();
@ -45,23 +67,23 @@ pub fn trim_conversation(
.saturating_sub(journal_cost)
.saturating_sub(reserve);
let msg_costs: Vec<usize> = conversation.iter()
.map(|m| msg_token_count(tokenizer, m)).collect();
let msg_costs: Vec<usize> = deduped.iter()
.map(|e| msg_token_count(tokenizer, e.message())).collect();
let total: usize = msg_costs.iter().sum();
let mut skip = 0;
let mut trimmed = total;
while trimmed > available && skip < conversation.len() {
while trimmed > available && skip < deduped.len() {
trimmed -= msg_costs[skip];
skip += 1;
}
// Walk forward to user message boundary
while skip < conversation.len() && conversation[skip].role != Role::User {
while skip < deduped.len() && deduped[skip].message().role != Role::User {
skip += 1;
}
conversation[skip..].to_vec()
deduped[skip..].to_vec()
}
/// Count the token footprint of a message using BPE tokenization.