Compute ContextBudget on demand from typed sources

Remove cached context_budget field and measure_budget(). Budget
is computed on demand via budget() which calls
ContextState::budget(). Each bucket counted from its typed source.
Memory split from conversation by identifying memory tool calls.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-02 03:07:45 -04:00
parent acdfbeeac3
commit eb4dae04cb
3 changed files with 72 additions and 34 deletions

View file

@ -62,8 +62,6 @@ pub struct Agent {
pub reasoning_effort: String, pub reasoning_effort: String,
/// Persistent conversation log — append-only record of all messages. /// Persistent conversation log — append-only record of all messages.
conversation_log: Option<ConversationLog>, conversation_log: Option<ConversationLog>,
/// Current context window budget breakdown.
pub context_budget: ContextBudget,
/// BPE tokenizer for token counting (cl100k_base — close enough /// BPE tokenizer for token counting (cl100k_base — close enough
/// for Claude and Qwen budget allocation, ~85-90% count accuracy). /// for Claude and Qwen budget allocation, ~85-90% count accuracy).
tokenizer: CoreBPE, tokenizer: CoreBPE,
@ -116,7 +114,6 @@ impl Agent {
process_tracker: ProcessTracker::new(), process_tracker: ProcessTracker::new(),
reasoning_effort: "none".to_string(), reasoning_effort: "none".to_string(),
conversation_log, conversation_log,
context_budget: ContextBudget::default(),
tokenizer, tokenizer,
context, context,
shared_context, shared_context,
@ -126,7 +123,6 @@ impl Agent {
agent.load_startup_journal(); agent.load_startup_journal();
agent.load_working_stack(); agent.load_working_stack();
agent.measure_budget();
agent.publish_context_state(); agent.publish_context_state();
agent agent
} }
@ -183,28 +179,11 @@ impl Agent {
/// Push a context-only message (system prompt, identity context, /// Push a context-only message (system prompt, identity context,
/// journal summaries). Not logged — these are reconstructed on /// journal summaries). Not logged — these are reconstructed on
/// every startup/compaction. /// every startup/compaction.
/// Measure context window usage by category. Uses the BPE tokenizer pub fn budget(&self) -> ContextBudget {
/// for direct token counting (no chars/4 approximation). let count_str = |s: &str| self.tokenizer.encode_with_special_tokens(s).len();
fn measure_budget(&mut self) { let count_msg = |m: &Message| crate::agent::context::msg_token_count(&self.tokenizer, m);
let count = |s: &str| self.tokenizer.encode_with_special_tokens(s).len(); let window = crate::agent::context::model_context_window(&self.client.model);
self.context.budget(&count_str, &count_msg, window)
let id_tokens = count(&self.context.system_prompt)
+ self.context.personality.iter()
.map(|(_, content)| count(content)).sum::<usize>();
let jnl_tokens: usize = self.context.journal.iter()
.map(|e| count(&e.content)).sum();
let mem_tokens: usize = self.context.loaded_nodes.iter()
.map(|node| count(&node.render())).sum();
let conv_tokens: usize = self.context.messages.iter()
.map(|m| crate::agent::context::msg_token_count(&self.tokenizer, m)).sum();
self.context_budget = ContextBudget {
identity_tokens: id_tokens,
memory_tokens: mem_tokens,
journal_tokens: jnl_tokens,
conversation_tokens: conv_tokens,
window_tokens: crate::agent::context::model_context_window(&self.client.model),
};
} }
/// Send a user message and run the agent loop until the model /// Send a user message and run the agent loop until the model
@ -372,7 +351,7 @@ impl Agent {
if let Some(usage) = &usage { if let Some(usage) = &usage {
self.last_prompt_tokens = usage.prompt_tokens; self.last_prompt_tokens = usage.prompt_tokens;
self.measure_budget();
self.publish_context_state(); self.publish_context_state();
let _ = ui_tx.send(UiMessage::StatusUpdate(StatusInfo { let _ = ui_tx.send(UiMessage::StatusUpdate(StatusInfo {
dmn_state: String::new(), // filled by main loop dmn_state: String::new(), // filled by main loop
@ -382,7 +361,7 @@ impl Agent {
completion_tokens: usage.completion_tokens, completion_tokens: usage.completion_tokens,
model: self.client.model.clone(), model: self.client.model.clone(),
turn_tools: 0, // tracked by TUI from ToolCall messages turn_tools: 0, // tracked by TUI from ToolCall messages
context_budget: self.context_budget.status_string(), context_budget: self.budget().status_string(),
})); }));
} }
@ -547,7 +526,7 @@ impl Agent {
if output.text.starts_with("Error:") { if output.text.starts_with("Error:") {
ds.tool_errors += 1; ds.tool_errors += 1;
} }
self.measure_budget();
self.publish_context_state(); self.publish_context_state();
return; return;
} }
@ -826,7 +805,7 @@ impl Agent {
/// Called after any change to context state (working stack, etc). /// Called after any change to context state (working stack, etc).
fn refresh_context_state(&mut self) { fn refresh_context_state(&mut self) {
self.measure_budget();
self.publish_context_state(); self.publish_context_state();
self.save_working_stack(); self.save_working_stack();
} }
@ -849,8 +828,16 @@ impl Agent {
/// Push the current context summary to the shared state for the TUI to read. /// Push the current context summary to the shared state for the TUI to read.
fn publish_context_state(&self) { fn publish_context_state(&self) {
let summary = self.context_state_summary();
if let Ok(mut dbg) = std::fs::OpenOptions::new().create(true).append(true)
.open("/tmp/poc-journal-debug.log") {
use std::io::Write;
for s in &summary {
let _ = writeln!(dbg, "[publish] {} ({} tokens, {} children)", s.name, s.tokens, s.children.len());
}
}
if let Ok(mut state) = self.shared_context.write() { if let Ok(mut state) = self.shared_context.write() {
*state = self.context_state_summary(); *state = summary;
} }
} }
@ -978,7 +965,7 @@ impl Agent {
self.context.journal = journal::parse_journal_text(&journal); self.context.journal = journal::parse_journal_text(&journal);
self.context.messages = messages; self.context.messages = messages;
self.last_prompt_tokens = 0; self.last_prompt_tokens = 0;
self.measure_budget();
self.publish_context_state(); self.publish_context_state();
} }
@ -1041,7 +1028,7 @@ impl Agent {
self.context.messages = messages; self.context.messages = messages;
dbglog!("[restore] built context window: {} messages", self.context.messages.len()); dbglog!("[restore] built context window: {} messages", self.context.messages.len());
self.last_prompt_tokens = 0; self.last_prompt_tokens = 0;
self.measure_budget();
self.publish_context_state(); self.publish_context_state();
true true
} }

View file

@ -342,6 +342,57 @@ pub const WORKING_STACK_INSTRUCTIONS: &str = "/home/kent/.consciousness/config/w
pub const WORKING_STACK_FILE: &str = "/home/kent/.consciousness/working-stack.json"; pub const WORKING_STACK_FILE: &str = "/home/kent/.consciousness/working-stack.json";
impl ContextState { impl ContextState {
/// Compute the context budget from typed sources.
pub fn budget(&self, count_str: &dyn Fn(&str) -> usize,
count_msg: &dyn Fn(&Message) -> usize,
window_tokens: usize) -> ContextBudget {
let id = count_str(&self.system_prompt)
+ self.personality.iter().map(|(_, c)| count_str(c)).sum::<usize>();
let jnl: usize = self.journal.iter().map(|e| count_str(&e.content)).sum();
let (mem, conv) = self.split_memory_conversation(count_msg);
ContextBudget {
identity_tokens: id,
memory_tokens: mem,
journal_tokens: jnl,
conversation_tokens: conv,
window_tokens,
}
}
/// Split conversation messages into memory tool interactions and
/// everything else. Returns (memory_tokens, conversation_tokens).
pub fn split_memory_conversation(&self, count: &dyn Fn(&Message) -> usize) -> (usize, usize) {
// Collect tool_call_ids that belong to memory tools
let mut memory_call_ids: std::collections::HashSet<String> = std::collections::HashSet::new();
for msg in &self.messages {
if let Some(ref calls) = msg.tool_calls {
for call in calls {
if call.function.name.starts_with("memory_")
|| call.function.name.starts_with("journal_") {
memory_call_ids.insert(call.id.clone());
}
}
}
}
let mut mem_tokens = 0;
let mut conv_tokens = 0;
for msg in &self.messages {
let tokens = count(msg);
let is_memory = match &msg.tool_call_id {
Some(id) => memory_call_ids.contains(id),
None => msg.tool_calls.as_ref().map_or(false, |calls|
calls.iter().all(|c| memory_call_ids.contains(&c.id))),
};
if is_memory {
mem_tokens += tokens;
} else {
conv_tokens += tokens;
}
}
(mem_tokens, conv_tokens)
}
pub fn render_context_message(&self) -> String { pub fn render_context_message(&self) -> String {
let mut parts: Vec<String> = self.personality.iter() let mut parts: Vec<String> = self.personality.iter()
.map(|(name, content)| format!("## {}\n\n{}", name, content)) .map(|(name, content)| format!("## {}\n\n{}", name, content))

View file

@ -964,7 +964,7 @@ async fn run(cli: cli::CliArgs) -> Result<()> {
completion_tokens: 0, completion_tokens: 0,
model: agent_guard.model().to_string(), model: agent_guard.model().to_string(),
turn_tools: 0, turn_tools: 0,
context_budget: agent_guard.context_budget.status_string(), context_budget: agent_guard.budget().status_string(),
})); }));
} }