Replace token counting with token generation via HuggingFace tokenizer

Add agent/tokenizer.rs with global Qwen 3.5 tokenizer that generates
actual token IDs including chat template wrapping. ContextEntry now
stores token_ids: Vec<u32> instead of tokens: usize — the count is
derived from the length.

ContextEntry::new() tokenizes automatically via the global tokenizer.
ContextSection::push_entry() takes a raw ConversationEntry and
tokenizes it. set_message() re-tokenizes without needing an external
tokenizer parameter.

Token IDs include the full chat template: <|im_start|>role\ncontent
<|im_end|>\n — so concatenating token_ids across entries produces a
ready-to-send prompt for vLLM's /v1/completions endpoint.

The old tiktoken CoreBPE is now unused on Agent (will be removed in
a followup). Token counts are now exact for Qwen 3.5 instead of the
~85-90% approximation from cl100k_base.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-08 11:20:03 -04:00
parent 70ee7abea5
commit 5e4067c04f
10 changed files with 540 additions and 97 deletions

View file

@ -33,12 +33,25 @@ pub enum ConversationEntry {
#[derive(Debug, Clone)]
pub struct ContextEntry {
pub entry: ConversationEntry,
/// Cached token count (0 for Log entries).
pub tokens: usize,
/// Cached tokenization — the actual token IDs for this entry's
/// contribution to the prompt (including chat template wrapping).
/// Empty for Log entries.
pub token_ids: Vec<u32>,
/// When this entry was added to the context.
pub timestamp: Option<DateTime<Utc>>,
}
impl ContextEntry {
/// Create a new entry, tokenizing via the global tokenizer.
pub fn new(entry: ConversationEntry, timestamp: Option<DateTime<Utc>>) -> Self {
let token_ids = super::tokenizer::tokenize_conv_entry(&entry);
Self { entry, token_ids, timestamp }
}
/// Token count — derived from cached token_ids length.
pub fn tokens(&self) -> usize { self.token_ids.len() }
}
/// A named section of the context window with cached token total.
#[derive(Debug, Clone)]
pub struct ContextSection {
@ -58,32 +71,40 @@ impl ContextSection {
pub fn len(&self) -> usize { self.entries.len() }
pub fn is_empty(&self) -> bool { self.entries.is_empty() }
/// Push an entry, updating the cached token total.
/// Push a ConversationEntry, tokenizing it and updating the total.
pub fn push_entry(&mut self, entry: ConversationEntry, timestamp: Option<DateTime<Utc>>) {
let ce = ContextEntry::new(entry, timestamp);
self.tokens += ce.tokens();
self.entries.push(ce);
}
/// Push a pre-built ContextEntry (for restore, cloning, etc).
pub fn push(&mut self, entry: ContextEntry) {
self.tokens += entry.tokens;
self.tokens += entry.tokens();
self.entries.push(entry);
}
/// Replace an entry at `index`, adjusting the token total.
pub fn set(&mut self, index: usize, entry: ContextEntry) {
self.tokens -= self.entries[index].tokens;
self.tokens += entry.tokens;
self.tokens -= self.entries[index].tokens();
self.tokens += entry.tokens();
self.entries[index] = entry;
}
/// Remove an entry at `index`, adjusting the token total.
pub fn del(&mut self, index: usize) -> ContextEntry {
let removed = self.entries.remove(index);
self.tokens -= removed.tokens;
self.tokens -= removed.tokens();
removed
}
/// Replace the message inside an entry, recomputing its token count.
pub fn set_message(&mut self, index: usize, tokenizer: &CoreBPE, msg: Message) {
let old_tokens = self.entries[index].tokens;
/// Replace the message inside an entry, re-tokenizing it.
pub fn set_message(&mut self, index: usize, msg: Message) {
let old_tokens = self.entries[index].tokens();
*self.entries[index].entry.message_mut() = msg;
let new_tokens = msg_token_count(tokenizer, self.entries[index].entry.api_message());
self.entries[index].tokens = new_tokens;
self.entries[index].token_ids = super::tokenizer::tokenize_conv_entry(
&self.entries[index].entry);
let new_tokens = self.entries[index].tokens();
self.tokens = self.tokens - old_tokens + new_tokens;
}
@ -96,7 +117,7 @@ impl ContextSection {
/// Bulk replace all entries, recomputing token total.
pub fn set_entries(&mut self, entries: Vec<ContextEntry>) {
self.tokens = entries.iter().map(|e| e.tokens).sum();
self.tokens = entries.iter().map(|e| e.tokens()).sum();
self.entries = entries;
}
@ -104,7 +125,7 @@ impl ContextSection {
pub fn trim(&mut self, fixed_tokens: usize) {
let result = trim_entries(&self.entries, fixed_tokens);
self.entries = result;
self.tokens = self.entries.iter().map(|e| e.tokens).sum();
self.tokens = self.entries.iter().map(|e| e.tokens()).sum();
}
/// Clear all entries.
@ -189,9 +210,9 @@ fn trim_entries(entries: &[ContextEntry], fixed_tokens: usize) -> Vec<ContextEnt
.map(|(_, e)| e.clone())
.collect();
let entry_total = |r: &[ContextEntry]| -> usize { r.iter().map(|e| e.tokens).sum::<usize>() };
let entry_total = |r: &[ContextEntry]| -> usize { r.iter().map(|e| e.tokens()).sum::<usize>() };
let mem_total = |r: &[ContextEntry]| -> usize {
r.iter().filter(|e| e.entry.is_memory()).map(|e| e.tokens).sum()
r.iter().filter(|e| e.entry.is_memory()).map(|e| e.tokens()).sum()
};
dbglog!("[trim] max={} fixed={} total={} entries={}",