Kill tiktoken — all token counting now uses Qwen 3.5 tokenizer

Remove tiktoken-rs dependency, CoreBPE field on Agent, and the
msg_token_count() function. All tokenization now goes through the
global HuggingFace tokenizer in agent/tokenizer.rs.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-08 11:25:28 -04:00
parent 5e4067c04f
commit 67e3228c32
4 changed files with 1 additions and 78 deletions

View file

@ -21,7 +21,6 @@ pub mod tools;
use std::sync::Arc;
use anyhow::Result;
use tiktoken_rs::CoreBPE;
use api::{ApiClient, ToolCall};
use api::{ContentPart, Message, MessageContent, Role};
@ -163,9 +162,6 @@ pub struct Agent {
pub provenance: String,
/// Persistent conversation log — append-only record of all messages.
pub conversation_log: Option<ConversationLog>,
/// BPE tokenizer for token counting (cl100k_base — close enough
/// for Claude and Qwen budget allocation, ~85-90% count accuracy).
tokenizer: CoreBPE,
/// Mutable context state — personality, working stack, etc.
pub context: ContextState,
/// App config — used to reload identity on compaction and model switching.
@ -193,9 +189,6 @@ impl Agent {
conversation_log: Option<ConversationLog>,
active_tools: tools::SharedActiveTools,
) -> Self {
let tokenizer = tiktoken_rs::cl100k_base()
.expect("failed to load cl100k_base tokenizer");
let mut system = ContextSection::new("System prompt");
system.push(ContextEntry::new(
ConversationEntry::System(Message::system(&system_prompt)), None));
@ -227,7 +220,6 @@ impl Agent {
pending_dmn_pause: false,
provenance: "manual".to_string(),
conversation_log,
tokenizer,
context,
app_config,
prompt_file,
@ -249,8 +241,6 @@ impl Agent {
/// personality, journal, entries) for KV cache sharing. The caller
/// appends the subconscious prompt as a user message and runs the turn.
pub fn fork(&self, tools: Vec<tools::Tool>) -> Self {
let tokenizer = tiktoken_rs::cl100k_base()
.expect("failed to load cl100k_base tokenizer");
Self {
client: self.client.clone(),
@ -267,7 +257,6 @@ impl Agent {
pending_dmn_pause: false,
provenance: self.provenance.clone(),
conversation_log: None,
tokenizer,
context: self.context.clone(),
app_config: self.app_config.clone(),
prompt_file: self.prompt_file.clone(),