move context functions from agent/context.rs to thought/context.rs

trim_conversation moved to thought/context.rs where model_context_window,
msg_token_count, is_context_overflow, is_stream_error already lived.
Delete the duplicate agent/context.rs (94 lines).

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-02 15:28:00 -04:00
parent 01bfbc0dad
commit 214806cb90
5 changed files with 48 additions and 104 deletions

View file

@ -423,6 +423,45 @@ pub fn is_stream_error(err: &anyhow::Error) -> bool {
err.to_string().contains("model stream error")
}
/// Trim conversation to fit within the context budget.
/// Returns the trimmed conversation messages (oldest dropped first).
pub fn trim_conversation(
context: &ContextState,
conversation: &[Message],
model: &str,
tokenizer: &CoreBPE,
) -> Vec<Message> {
let count = |s: &str| tokenizer.encode_with_special_tokens(s).len();
let max_tokens = context_budget_tokens(model);
let identity_cost = count(&context.system_prompt)
+ context.personality.iter().map(|(_, c)| count(c)).sum::<usize>();
let journal_cost: usize = context.journal.iter().map(|e| count(&e.content)).sum();
let reserve = max_tokens / 4;
let available = max_tokens
.saturating_sub(identity_cost)
.saturating_sub(journal_cost)
.saturating_sub(reserve);
let msg_costs: Vec<usize> = conversation.iter()
.map(|m| msg_token_count(tokenizer, m)).collect();
let total: usize = msg_costs.iter().sum();
let mut skip = 0;
let mut trimmed = total;
while trimmed > available && skip < conversation.len() {
trimmed -= msg_costs[skip];
skip += 1;
}
// Walk forward to user message boundary
while skip < conversation.len() && conversation[skip].role != Role::User {
skip += 1;
}
conversation[skip..].to_vec()
}
fn parse_msg_timestamp(msg: &Message) -> Option<DateTime<Utc>> {
msg.timestamp
.as_ref()