Compare commits

..

5 commits

Author SHA1 Message Date
ProofOfConcept
15f3be27ce Show MCP server failures in the UI instead of debug log
MCP server spawn failures were going to dbglog where the user
wouldn't see them. Route through the agent's notify so they appear
on the status bar.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-09 22:46:48 -04:00
ProofOfConcept
3e0d52c451 Redirect noisy warnings to debug log to stop TUI corruption
Duplicate key warnings fire on every store load and were writing to
stderr, corrupting the TUI display. Log write warnings and MCP
server failures are similarly routine. Route these to dbglog.

Serious errors (rkyv snapshot failures, store corruption) remain on
stderr — those are real problems the user needs to see.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-09 22:46:48 -04:00
ProofOfConcept
c31d531954 Fix status bar timer: use activity start time, tick every 1s
The status bar timer was showing turn/call elapsed times (0s, 0/60s)
instead of the activity's actual elapsed time. Use activity_started
from the ActivityEntry directly.

Add a 1s tick to the UI select loop when an activity is active so
the timer updates live.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-09 22:36:45 -04:00
ProofOfConcept
5fe22a5f23 Use ActivityGuard for context overflow retry progress
Instead of two separate notifications piling up on the status bar,
use a single ActivityGuard that updates in place during overflow
retries and auto-completes when the turn finishes.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-09 22:32:38 -04:00
ProofOfConcept
121b46e1d2 Add ActivityGuard::update() for in-place progress updates
Lets long-running operations update their status bar message without
creating/dropping a new activity per iteration. Useful for loops
like memory scoring where you want "scoring: 3/25 keyname" updating
in place.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-09 22:18:43 -04:00
7 changed files with 50 additions and 15 deletions

View file

@ -801,7 +801,7 @@ impl ContextState {
pub fn push_log(&mut self, section: Section, node: AstNode) { pub fn push_log(&mut self, section: Section, node: AstNode) {
if let Some(ref log) = self.conversation_log { if let Some(ref log) = self.conversation_log {
if let Err(e) = log.append_node(&node) { if let Err(e) = log.append_node(&node) {
eprintln!("warning: log: {:#}", e); dbglog!("warning: log: {:#}", e);
} }
} }
self.section_mut(section).push(node); self.section_mut(section).push(node);

View file

@ -42,6 +42,17 @@ pub struct ActivityGuard {
id: u64, id: u64,
} }
impl ActivityGuard {
pub async fn update(&self, label: impl Into<String>) {
let label = label.into();
let mut st = self.agent.state.lock().await;
if let Some(entry) = st.activities.iter_mut().find(|a| a.id == self.id) {
entry.label = label;
}
st.changed.notify_one();
}
}
const ACTIVITY_LINGER: std::time::Duration = std::time::Duration::from_secs(5); const ACTIVITY_LINGER: std::time::Duration = std::time::Duration::from_secs(5);
impl Drop for ActivityGuard { impl Drop for ActivityGuard {
@ -307,6 +318,7 @@ impl Agent {
} }
let mut overflow_retries: u32 = 0; let mut overflow_retries: u32 = 0;
let mut overflow_activity: Option<ActivityGuard> = None;
let mut empty_retries: u32 = 0; let mut empty_retries: u32 = 0;
let mut ds = DispatchState::new(); let mut ds = DispatchState::new();
@ -371,8 +383,12 @@ impl Agent {
} }
if overflow_retries < 2 { if overflow_retries < 2 {
overflow_retries += 1; overflow_retries += 1;
agent.state.lock().await.notify( let msg = format!("context overflow — compacting ({}/2)", overflow_retries);
format!("context overflow — retrying ({}/2)", overflow_retries)); match &overflow_activity {
Some(a) => a.update(&msg).await,
None => overflow_activity = Some(
start_activity(&agent, &msg).await),
}
agent.compact().await; agent.compact().await;
continue; continue;
} }
@ -387,7 +403,7 @@ impl Agent {
if let Some(ref log) = ctx.conversation_log { if let Some(ref log) = ctx.conversation_log {
let node = &ctx.conversation()[branch_idx]; let node = &ctx.conversation()[branch_idx];
if let Err(e) = log.append_node(node) { if let Err(e) = log.append_node(node) {
eprintln!("warning: log: {:#}", e); dbglog!("warning: log: {:#}", e);
} }
} }
} }
@ -562,7 +578,7 @@ impl Agent {
} }
} }
Err(e) => { Err(e) => {
eprintln!("warning: failed to reload identity: {:#}", e); dbglog!("warning: failed to reload identity: {:#}", e);
} }
} }

View file

@ -140,7 +140,7 @@ fn registry() -> &'static TokioMutex<Registry> {
}) })
} }
async fn ensure_init() -> Result<()> { async fn ensure_init(agent: Option<&std::sync::Arc<super::super::Agent>>) -> Result<()> {
let mut reg = registry().lock().await; let mut reg = registry().lock().await;
if !reg.servers.is_empty() { return Ok(()); } if !reg.servers.is_empty() { return Ok(()); }
let configs = crate::config::get().mcp_servers.clone(); let configs = crate::config::get().mcp_servers.clone();
@ -148,14 +148,24 @@ async fn ensure_init() -> Result<()> {
let args: Vec<&str> = cfg.args.iter().map(|s| s.as_str()).collect(); let args: Vec<&str> = cfg.args.iter().map(|s| s.as_str()).collect();
match McpServer::spawn(&cfg.name, &cfg.command, &args).await { match McpServer::spawn(&cfg.name, &cfg.command, &args).await {
Ok(server) => reg.servers.push(server), Ok(server) => reg.servers.push(server),
Err(e) => eprintln!("warning: MCP server {} failed: {:#}", cfg.name, e), Err(e) => {
let msg = format!("MCP server {} failed: {:#}", cfg.name, e);
dbglog!("{}", msg);
if let Some(a) = agent {
if let Ok(mut st) = a.state.try_lock() {
st.notify(msg);
}
}
}
} }
} }
Ok(()) Ok(())
} }
pub(super) async fn call_tool(name: &str, args: &serde_json::Value) -> Result<String> { pub(super) async fn call_tool(name: &str, args: &serde_json::Value,
ensure_init().await?; agent: Option<&std::sync::Arc<super::super::Agent>>,
) -> Result<String> {
ensure_init(agent).await?;
let mut reg = registry().lock().await; let mut reg = registry().lock().await;
let server = reg.servers.iter_mut() let server = reg.servers.iter_mut()
.find(|s| s.tools.iter().any(|t| t.name == name)) .find(|s| s.tools.iter().any(|t| t.name == name))
@ -178,7 +188,7 @@ pub(super) async fn call_tool(name: &str, args: &serde_json::Value) -> Result<St
} }
pub(super) async fn tool_definitions_json() -> Vec<String> { pub(super) async fn tool_definitions_json() -> Vec<String> {
let _ = ensure_init().await; let _ = ensure_init(None).await;
let reg = registry().lock().await; let reg = registry().lock().await;
reg.servers.iter() reg.servers.iter()
.flat_map(|s| s.tools.iter()) .flat_map(|s| s.tools.iter())

View file

@ -164,7 +164,7 @@ pub async fn dispatch_with_agent(
None => true, None => true,
}; };
if allowed { if allowed {
if let Ok(result) = mcp_client::call_tool(name, args).await { if let Ok(result) = mcp_client::call_tool(name, args, agent.as_ref()).await {
return result; return result;
} }
} }

View file

@ -200,7 +200,7 @@ impl Store {
// Report duplicate keys // Report duplicate keys
for (key, uuids) in &key_uuids { for (key, uuids) in &key_uuids {
if uuids.len() > 1 { if uuids.len() > 1 {
eprintln!("WARNING: key '{}' has {} UUIDs (duplicate nodes)", key, uuids.len()); dbglog!("WARNING: key '{}' has {} UUIDs (duplicate nodes)", key, uuids.len());
} }
} }

View file

@ -902,9 +902,8 @@ impl InteractScreen {
// Draw status bar with live activity indicator // Draw status bar with live activity indicator
let timer = if !app.activity.is_empty() { let timer = if !app.activity.is_empty() {
let total = self.turn_started.map(|t| t.elapsed().as_secs()).unwrap_or(0); let elapsed = app.activity_started.map(|t| t.elapsed().as_secs()).unwrap_or(0);
let call = self.call_started.map(|t| t.elapsed().as_secs()).unwrap_or(0); format!(" {}s", elapsed)
format!(" {}s, {}/{}s", total, call, self.call_timeout_secs)
} else { } else {
String::new() String::new()
}; };
@ -1057,6 +1056,8 @@ impl ScreenView for InteractScreen {
app.activity = st.activities.last() app.activity = st.activities.last()
.map(|a| a.label.clone()) .map(|a| a.label.clone())
.unwrap_or_default(); .unwrap_or_default();
app.activity_started = st.activities.last()
.map(|a| a.started);
} }
if let Ok(ctx) = self.agent.context.try_lock() { if let Ok(ctx) = self.agent.context.try_lock() {
let window = crate::agent::context::context_window(); let window = crate::agent::context::context_window();

View file

@ -358,7 +358,11 @@ async fn run(
let mut startup_done = false; let mut startup_done = false;
let mut dirty = true; // render on first loop let mut dirty = true; // render on first loop
let mut activity_tick = tokio::time::interval(std::time::Duration::from_secs(1));
activity_tick.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
loop { loop {
let has_activity = !app.activity.is_empty();
tokio::select! { tokio::select! {
biased; biased;
@ -380,6 +384,10 @@ async fn run(
Some(channels) = channel_rx.recv() => { Some(channels) = channel_rx.recv() => {
app.set_channel_status(channels); app.set_channel_status(channels);
} }
_ = activity_tick.tick(), if has_activity => {
dirty = true;
}
} }
// State sync on every wake // State sync on every wake