Kill publish_context_state() — screens lock the agent directly

F1 and F2 screens now call agent.context_state_summary() directly
via try_lock/lock instead of reading from a shared RwLock cache.
Removes SharedContextState, publish_context_state(), and
publish_context_state_with_scores().

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-07 03:03:24 -04:00
parent 48c843234d
commit 04e260c081
6 changed files with 30 additions and 64 deletions

View file

@ -28,7 +28,7 @@ use context::{ConversationEntry, ContextState};
use tools::{summarize_args, working_stack};
use crate::mind::log::ConversationLog;
use crate::agent::context::{ContextSection, SharedContextState};
use crate::agent::context::ContextSection;
use crate::subconscious::learn;
// --- Activity tracking (RAII guards) ---
@ -166,8 +166,6 @@ pub struct Agent {
tokenizer: CoreBPE,
/// Mutable context state — personality, working stack, etc.
pub context: ContextState,
/// Shared live context summary — TUI reads this directly for debug screen.
pub shared_context: SharedContextState,
/// App config — used to reload identity on compaction and model switching.
pub app_config: crate::config::AppConfig,
pub prompt_file: String,
@ -193,7 +191,6 @@ impl Agent {
app_config: crate::config::AppConfig,
prompt_file: String,
conversation_log: Option<ConversationLog>,
shared_context: SharedContextState,
active_tools: tools::SharedActiveTools,
) -> Self {
let tokenizer = tiktoken_rs::cl100k_base()
@ -223,7 +220,6 @@ impl Agent {
conversation_log,
tokenizer,
context,
shared_context,
app_config,
prompt_file,
session_id,
@ -236,7 +232,6 @@ impl Agent {
agent.load_startup_journal();
agent.load_working_stack();
agent.publish_context_state();
agent
}
@ -265,7 +260,6 @@ impl Agent {
conversation_log: None,
tokenizer,
context: self.context.clone(),
shared_context: context::shared_context_state(),
app_config: self.app_config.clone(),
prompt_file: self.prompt_file.clone(),
session_id: self.session_id.clone(),
@ -490,7 +484,6 @@ impl Agent {
if let Some(usage) = &usage {
me.last_prompt_tokens = usage.prompt_tokens;
me.publish_context_state();
}
// Empty response — nudge and retry
@ -542,7 +535,6 @@ impl Agent {
for (call, output) in results {
me.apply_tool_result(&call, output, &mut ds);
}
me.publish_context_state();
continue;
}
@ -657,7 +649,6 @@ impl Agent {
let mut msg = Message::tool_result(&call.id, &output);
msg.stamp();
self.push_entry(ConversationEntry::Memory { key: key.to_string(), message: msg });
self.publish_context_state();
return;
}
}
@ -910,25 +901,6 @@ impl Agent {
}
}
/// Push the current context summary to the shared state for the TUI to read.
pub fn publish_context_state(&self) {
self.publish_context_state_with_scores(None);
}
pub fn publish_context_state_with_scores(&self, memory_scores: Option<&learn::MemoryScore>) {
let summary = self.context_state_summary(memory_scores);
if let Ok(mut dbg) = std::fs::OpenOptions::new().create(true).append(true)
.open("/tmp/poc-journal-debug.log") {
use std::io::Write;
for s in &summary {
let _ = writeln!(dbg, "[publish] {} ({} tokens, {} children)", s.name, s.tokens, s.children.len());
}
}
if let Ok(mut state) = self.shared_context.write() {
*state = summary;
}
}
/// Replace base64 image data in older messages with text placeholders.
/// Keeps the 2 most recent images live (enough for motion/comparison).
/// The tool result message before each image records what was loaded.
@ -1013,9 +985,8 @@ impl Agent {
before, after, before_mem, after_mem, before_conv, after_conv);
self.generation += 1;
self.last_prompt_tokens = 0;
self.publish_context_state();
let sections = self.shared_context.read().map(|s| s.clone()).unwrap_or_default();
let sections = self.context_state_summary(None);
dbglog!("[compact] budget: {}", context::sections_budget_string(&sections));
}
@ -1043,8 +1014,8 @@ impl Agent {
self.context.entries = all;
self.compact();
// Estimate prompt tokens from sections so status bar isn't 0 on startup
let sections = self.shared_context.read().map(|s| s.clone()).unwrap_or_default();
self.last_prompt_tokens = context::sections_used(&sections) as u32;
self.last_prompt_tokens = context::sections_used(
&self.context_state_summary(None)) as u32;
true
}