poc-memory status: add ratatui TUI dashboard

Per-agent-type tabs (health, replay, linker, separator, transfer,
apply, orphans, cap, digest, digest-links, knowledge) with dynamic
visibility — tabs only appear when tasks or log history exist.

Features:
- Overview tab: health gauges (α, gini, cc, episodic%), in-flight
  tasks, and recent log entries
- Pipeline tab: table with phase ordering and status
- Per-agent tabs: active tasks, output logs, log history
- Log tab: auto-scrolling daemon.log tail
- Vim-style count prefix: e.g. 5r runs 5 iterations of the agent
- Flash messages for RPC feedback
- Tab/Shift-Tab navigation, number keys for tab selection

Also adds run-agent RPC to the daemon: accepts agent type and
iteration count, spawns chained tasks with LLM resource pool.

poc-memory status launches TUI when stdout is a terminal and daemon
is running, falls back to text output otherwise.
This commit is contained in:
ProofOfConcept 2026-03-10 00:41:29 -04:00
parent 06df66cf4c
commit ef760f0053
6 changed files with 1247 additions and 2 deletions

View file

@ -937,7 +937,7 @@ pub fn run_daemon() -> Result<(), String> {
let choir_main = Arc::clone(&choir);
let last_daily_main = Arc::clone(&last_daily);
let graph_health_main = Arc::clone(&graph_health);
status_socket_loop(&choir_main, &last_daily_main, &graph_health_main);
status_socket_loop(&choir_main, &last_daily_main, &graph_health_main, &llm);
log_event("daemon", "stopping", "");
eprintln!("Shutting down...");
@ -994,9 +994,10 @@ fn status_sock_path() -> PathBuf {
/// Any connection gets the live status JSON written and closed.
/// Also handles SIGINT/SIGTERM for clean shutdown.
fn status_socket_loop(
choir: &Choir,
choir: &Arc<Choir>,
last_daily: &Arc<Mutex<Option<chrono::NaiveDate>>>,
graph_health: &Arc<Mutex<Option<GraphHealth>>>,
llm: &Arc<ResourcePool>,
) {
use std::io::{Read as _, Write as _};
use std::os::unix::net::UnixListener;
@ -1047,6 +1048,44 @@ fn status_socket_loop(
let _ = stream.write_all(b"{\"ok\":true,\"action\":\"consolidation scheduled\"}\n");
log_event("rpc", "consolidate", "triggered via socket");
}
cmd if cmd.starts_with("run-agent ") => {
let parts: Vec<&str> = cmd.splitn(3, ' ').collect();
let agent_type = parts.get(1).unwrap_or(&"replay");
let count: usize = parts.get(2)
.and_then(|s| s.parse().ok())
.unwrap_or(1);
let batch_size = 5;
let today = chrono::Local::now().format("%Y-%m-%d");
let ts = chrono::Local::now().format("%H%M%S");
let mut prev = None;
let mut spawned = 0;
let mut remaining = count;
while remaining > 0 {
let batch = remaining.min(batch_size);
let agent = agent_type.to_string();
let task_name = format!("c-{}-rpc{}:{}", agent, ts, today);
let mut builder = choir.spawn(task_name)
.resource(llm)
.retries(1)
.init(move |ctx| {
job_consolidation_agent(ctx, &agent, batch)
});
if let Some(ref dep) = prev {
builder.depend_on(dep);
}
prev = Some(builder.run());
remaining -= batch;
spawned += 1;
}
let msg = format!("{{\"ok\":true,\"action\":\"queued {} {} run(s) ({} tasks)\"}}\n",
count, agent_type, spawned);
let _ = stream.write_all(msg.as_bytes());
log_event("rpc", "run-agent",
&format!("{} x{}", agent_type, count));
}
_ => {
// Default: return status
let status = build_status(choir, *last_daily.lock().unwrap(), graph_health);