Kill tiktoken — all token counting now uses Qwen 3.5 tokenizer
Remove tiktoken-rs dependency, CoreBPE field on Agent, and the msg_token_count() function. All tokenization now goes through the global HuggingFace tokenizer in agent/tokenizer.rs. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
parent
5e4067c04f
commit
67e3228c32
4 changed files with 1 additions and 78 deletions
|
|
@ -7,7 +7,6 @@
|
|||
use crate::agent::api::*;
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tiktoken_rs::CoreBPE;
|
||||
use crate::agent::tools::working_stack;
|
||||
|
||||
// --- Context state types ---
|
||||
|
|
@ -254,26 +253,6 @@ fn lowest_scored_memory(entries: &[ContextEntry]) -> Option<usize> {
|
|||
.map(|(i, _)| i)
|
||||
}
|
||||
|
||||
/// Count the token footprint of a message using BPE tokenization.
|
||||
pub fn msg_token_count(tokenizer: &CoreBPE, msg: &Message) -> usize {
|
||||
let count = |s: &str| tokenizer.encode_with_special_tokens(s).len();
|
||||
let content = msg.content.as_ref().map_or(0, |c| match c {
|
||||
MessageContent::Text(s) => count(s),
|
||||
MessageContent::Parts(parts) => parts.iter()
|
||||
.map(|p| match p {
|
||||
ContentPart::Text { text } => count(text),
|
||||
ContentPart::ImageUrl { .. } => 85,
|
||||
})
|
||||
.sum(),
|
||||
});
|
||||
let tools = msg.tool_calls.as_ref().map_or(0, |calls| {
|
||||
calls.iter()
|
||||
.map(|c| count(&c.function.arguments) + count(&c.function.name))
|
||||
.sum()
|
||||
});
|
||||
content + tools
|
||||
}
|
||||
|
||||
/// Detect context window overflow errors from the API.
|
||||
pub fn is_context_overflow(err: &anyhow::Error) -> bool {
|
||||
let msg = err.to_string().to_lowercase();
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue