Remove poc-memory daemon and RPC infrastructure

The background daemon and its job orchestration are redundant now that
the consciousness binary handles everything directly. Gut daemon.rs
down to just GraphHealth + compute_graph_health (used by the F4 TUI
screen), remove the DaemonCmd CLI subcommand, strip daemon RPC
fast-paths from cli/agent.rs, and drop the jobkit dependency.

-1330 lines.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
ProofOfConcept 2026-04-09 20:07:05 -04:00
parent e6c7b82a0f
commit aad0cd669a
6 changed files with 29 additions and 1359 deletions

View file

@ -2,7 +2,7 @@
use crate::store;
pub fn cmd_run_agent(agent: &str, count: usize, target: &[String], query: Option<&str>, dry_run: bool, local: bool, state_dir: Option<&str>) -> Result<(), String> {
pub fn cmd_run_agent(agent: &str, count: usize, target: &[String], query: Option<&str>, dry_run: bool, _local: bool, state_dir: Option<&str>) -> Result<(), String> {
// Mark as agent so tool calls (e.g. poc-memory render) don't
// pollute the user's seen set as a side effect
// SAFETY: single-threaded at this point (CLI startup, before any agent work)
@ -18,18 +18,6 @@ pub fn cmd_run_agent(agent: &str, count: usize, target: &[String], query: Option
unsafe { std::env::set_var("POC_MEMORY_DRY_RUN", "1"); }
}
let needs_local = local || dry_run;
let has_targets = !target.is_empty() || query.is_some();
// Fast path: no explicit targets, daemon available — just queue via RPC
if !needs_local && !has_targets {
if crate::agents::daemon::send_rpc_pub("ping").is_some() {
return crate::agents::daemon::rpc_run_agent(agent, count);
}
println!("Daemon not running — falling back to local execution");
}
// Slow path: need the store for local execution or target resolution
let mut store = store::Store::load()?;
// Resolve targets: explicit --target, --query, or agent's default query
@ -50,32 +38,15 @@ pub fn cmd_run_agent(agent: &str, count: usize, target: &[String], query: Option
};
if !resolved_targets.is_empty() {
// --local or daemon unavailable: run directly
if needs_local || crate::agents::daemon::send_rpc_pub("ping").is_none() {
if !needs_local {
println!("Daemon not running — falling back to local execution");
}
for (i, key) in resolved_targets.iter().enumerate() {
println!("[{}] [{}/{}] {}", agent, i + 1, resolved_targets.len(), key);
if i > 0 { store = store::Store::load()?; }
if let Err(e) = crate::agent::oneshot::run_one_agent(
&mut store, agent, count, Some(&[key.clone()]),
) {
println!("[{}] ERROR on {}: {}", agent, key, e);
}
}
return Ok(());
}
// Queue to daemon
let mut queued = 0;
for key in &resolved_targets {
let cmd = format!("run-agent {} 1 target:{}", agent, key);
if crate::agents::daemon::send_rpc_pub(&cmd).is_some() {
queued += 1;
for (i, key) in resolved_targets.iter().enumerate() {
println!("[{}] [{}/{}] {}", agent, i + 1, resolved_targets.len(), key);
if i > 0 { store = store::Store::load()?; }
if let Err(e) = crate::agent::oneshot::run_one_agent(
&mut store, agent, count, Some(&[key.clone()]),
) {
println!("[{}] ERROR on {}: {}", agent, key, e);
}
}
println!("[{}] queued {} tasks to daemon", agent, queued);
} else {
// Local execution (--local, --debug, dry-run, or daemon unavailable)
crate::agent::oneshot::run_one_agent(

View file

@ -80,5 +80,5 @@ pub use hippocampus::query::parser as query_parser;
pub use subconscious as agents;
pub use subconscious::{
audit, consolidate,
digest, daemon,
digest,
};

View file

@ -439,45 +439,8 @@ enum GraphCmd {
},
}
#[derive(Subcommand)]
enum DaemonCmd {
/// Start the daemon (default)
Start,
/// Show daemon status
Status,
/// Show daemon log
Log {
/// Job name to filter by
job: Option<String>,
/// Tail a task's log file (drill down from daemon log)
#[arg(long)]
task: Option<String>,
/// Number of lines to show
#[arg(long, default_value_t = 20)]
lines: usize,
},
/// Trigger consolidation via daemon
Consolidate,
/// Run an agent via the daemon
Run {
/// Agent name (e.g. organize, replay, linker)
#[arg(default_value = "replay")]
agent: String,
/// Batch size
#[arg(default_value_t = 1)]
count: usize,
},
/// Interactive TUI
Tui,
/// Reload config file without restarting
ReloadConfig,
}
#[derive(Subcommand)]
enum AgentCmd {
/// Background job daemon
#[command(subcommand)]
Daemon(DaemonCmd),
/// Run knowledge agents to convergence
#[command(name = "knowledge-loop")]
KnowledgeLoop {
@ -859,35 +822,9 @@ impl Run for CursorCmd {
}
}
impl Run for DaemonCmd {
fn run(self) -> Result<(), String> {
match self {
Self::Start => daemon::run_daemon(),
Self::Status => daemon::show_status(),
Self::Log { job, task, lines } => {
if let Some(ref task_name) = task {
daemon::show_task_log(task_name, lines)
} else {
daemon::show_log(job.as_deref(), lines)
}
}
Self::Consolidate => daemon::rpc_consolidate(),
Self::Run { agent, count } => daemon::rpc_run_agent(&agent, count),
Self::Tui => Err("TUI moved to consciousness binary (F4/F5)".into()),
Self::ReloadConfig => {
match daemon::send_rpc_pub("reload-config") {
Some(resp) => { eprintln!("{}", resp.trim()); Ok(()) }
None => Err("daemon not running".into()),
}
}
}
}
}
impl Run for AgentCmd {
fn run(self) -> Result<(), String> {
match self {
Self::Daemon(sub) => sub.run(),
Self::KnowledgeLoop { max_cycles, batch_size, window, max_depth }
=> cli::agent::cmd_knowledge_loop(max_cycles, batch_size, window, max_depth),
Self::ConsolidateBatch { count, auto, agent }

File diff suppressed because it is too large Load diff