Kill tiktoken — all token counting now uses Qwen 3.5 tokenizer

Remove tiktoken-rs dependency, CoreBPE field on Agent, and the
msg_token_count() function. All tokenization now goes through the
global HuggingFace tokenizer in agent/tokenizer.rs.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-08 11:25:28 -04:00
parent 5e4067c04f
commit 67e3228c32
4 changed files with 1 additions and 78 deletions

View file

@ -7,7 +7,6 @@
use crate::agent::api::*;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use tiktoken_rs::CoreBPE;
use crate::agent::tools::working_stack;
// --- Context state types ---
@ -254,26 +253,6 @@ fn lowest_scored_memory(entries: &[ContextEntry]) -> Option<usize> {
.map(|(i, _)| i)
}
/// Count the token footprint of a message using BPE tokenization.
pub fn msg_token_count(tokenizer: &CoreBPE, msg: &Message) -> usize {
let count = |s: &str| tokenizer.encode_with_special_tokens(s).len();
let content = msg.content.as_ref().map_or(0, |c| match c {
MessageContent::Text(s) => count(s),
MessageContent::Parts(parts) => parts.iter()
.map(|p| match p {
ContentPart::Text { text } => count(text),
ContentPart::ImageUrl { .. } => 85,
})
.sum(),
});
let tools = msg.tool_calls.as_ref().map_or(0, |calls| {
calls.iter()
.map(|c| count(&c.function.arguments) + count(&c.function.name))
.sum()
});
content + tools
}
/// Detect context window overflow errors from the API.
pub fn is_context_overflow(err: &anyhow::Error) -> bool {
let msg = err.to_string().to_lowercase();

View file

@ -21,7 +21,6 @@ pub mod tools;
use std::sync::Arc;
use anyhow::Result;
use tiktoken_rs::CoreBPE;
use api::{ApiClient, ToolCall};
use api::{ContentPart, Message, MessageContent, Role};
@ -163,9 +162,6 @@ pub struct Agent {
pub provenance: String,
/// Persistent conversation log — append-only record of all messages.
pub conversation_log: Option<ConversationLog>,
/// BPE tokenizer for token counting (cl100k_base — close enough
/// for Claude and Qwen budget allocation, ~85-90% count accuracy).
tokenizer: CoreBPE,
/// Mutable context state — personality, working stack, etc.
pub context: ContextState,
/// App config — used to reload identity on compaction and model switching.
@ -193,9 +189,6 @@ impl Agent {
conversation_log: Option<ConversationLog>,
active_tools: tools::SharedActiveTools,
) -> Self {
let tokenizer = tiktoken_rs::cl100k_base()
.expect("failed to load cl100k_base tokenizer");
let mut system = ContextSection::new("System prompt");
system.push(ContextEntry::new(
ConversationEntry::System(Message::system(&system_prompt)), None));
@ -227,7 +220,6 @@ impl Agent {
pending_dmn_pause: false,
provenance: "manual".to_string(),
conversation_log,
tokenizer,
context,
app_config,
prompt_file,
@ -249,8 +241,6 @@ impl Agent {
/// personality, journal, entries) for KV cache sharing. The caller
/// appends the subconscious prompt as a user message and runs the turn.
pub fn fork(&self, tools: Vec<tools::Tool>) -> Self {
let tokenizer = tiktoken_rs::cl100k_base()
.expect("failed to load cl100k_base tokenizer");
Self {
client: self.client.clone(),
@ -267,7 +257,6 @@ impl Agent {
pending_dmn_pause: false,
provenance: self.provenance.clone(),
conversation_log: None,
tokenizer,
context: self.context.clone(),
app_config: self.app_config.clone(),
prompt_file: self.prompt_file.clone(),