move LLM-dependent modules into agents/ subdir

Separate the agent layer (everything that calls external LLMs or
orchestrates sequences of such calls) from core graph infrastructure.

agents/: llm, prompts, audit, consolidate, knowledge, enrich,
         fact_mine, digest, daemon

Root: store/, graph, spectral, search, similarity, lookups, query,
      config, util, migrate, neuro/ (scoring + rewrite)

Re-exports at crate root preserve backwards compatibility so
`crate::llm`, `crate::digest` etc. continue to work.
This commit is contained in:
ProofOfConcept 2026-03-08 21:27:41 -04:00
parent 3dddc40841
commit cee9b76a7b
13 changed files with 68 additions and 46 deletions

View file

@ -3,7 +3,7 @@
// Each batch of links gets reviewed by Sonnet, which returns per-link actions: // Each batch of links gets reviewed by Sonnet, which returns per-link actions:
// KEEP, DELETE, RETARGET, WEAKEN, STRENGTHEN. Batches run in parallel via rayon. // KEEP, DELETE, RETARGET, WEAKEN, STRENGTHEN. Batches run in parallel via rayon.
use crate::llm::call_sonnet; use super::llm::call_sonnet;
use crate::store::{self, Store, new_relation}; use crate::store::{self, Store, new_relation};
use std::collections::HashSet; use std::collections::HashSet;

View file

@ -10,8 +10,8 @@
// //
// apply_consolidation() processes consolidation reports independently. // apply_consolidation() processes consolidation reports independently.
use crate::digest; use super::digest;
use crate::llm::{call_sonnet, parse_json_response}; use super::llm::{call_sonnet, parse_json_response};
use crate::neuro; use crate::neuro;
use crate::store::{self, Store, new_relation}; use crate::store::{self, Store, new_relation};
@ -98,7 +98,7 @@ pub fn consolidate_full_with_progress(
*store = Store::load()?; *store = Store::load()?;
} }
let prompt = match neuro::agent_prompt(store, agent_type, *count) { let prompt = match super::prompts::agent_prompt(store, agent_type, *count) {
Ok(p) => p, Ok(p) => p,
Err(e) => { Err(e) => {
let msg = format!(" ERROR building prompt: {}", e); let msg = format!(" ERROR building prompt: {}", e);
@ -266,7 +266,7 @@ fn build_consolidation_prompt(store: &Store, report_keys: &[String]) -> Result<S
"=".repeat(60), key, content)); "=".repeat(60), key, content));
} }
neuro::load_prompt("consolidation", &[("{{REPORTS}}", &report_text)]) super::prompts::load_prompt("consolidation", &[("{{REPORTS}}", &report_text)])
} }
/// Run the full apply-consolidation pipeline. /// Run the full apply-consolidation pipeline.

View file

@ -103,7 +103,7 @@ fn job_experience_mine(ctx: &ExecutionContext, path: &str, segment: Option<usize
ctx.log_line("loading store"); ctx.log_line("loading store");
let mut store = crate::store::Store::load()?; let mut store = crate::store::Store::load()?;
ctx.log_line("mining"); ctx.log_line("mining");
let count = crate::enrich::experience_mine(&mut store, &path, segment)?; let count = super::enrich::experience_mine(&mut store, &path, segment)?;
ctx.log_line(format!("{count} entries mined")); ctx.log_line(format!("{count} entries mined"));
Ok(()) Ok(())
}) })
@ -115,7 +115,7 @@ fn job_fact_mine(ctx: &ExecutionContext, path: &str) -> Result<(), TaskError> {
ctx.log_line("mining facts"); ctx.log_line("mining facts");
let p = std::path::Path::new(&path); let p = std::path::Path::new(&path);
let progress = |msg: &str| { ctx.set_progress(msg); }; let progress = |msg: &str| { ctx.set_progress(msg); };
let count = crate::fact_mine::mine_and_store(p, Some(&progress))?; let count = super::fact_mine::mine_and_store(p, Some(&progress))?;
ctx.log_line(format!("{count} facts stored")); ctx.log_line(format!("{count} facts stored"));
Ok(()) Ok(())
}) })
@ -125,7 +125,7 @@ fn job_consolidate(ctx: &ExecutionContext) -> Result<(), TaskError> {
run_job(ctx, "consolidate", || { run_job(ctx, "consolidate", || {
ctx.log_line("loading store"); ctx.log_line("loading store");
let mut store = crate::store::Store::load()?; let mut store = crate::store::Store::load()?;
crate::consolidate::consolidate_full_with_progress(&mut store, &|msg| { super::consolidate::consolidate_full_with_progress(&mut store, &|msg| {
ctx.log_line(msg); ctx.log_line(msg);
}) })
}) })
@ -133,13 +133,13 @@ fn job_consolidate(ctx: &ExecutionContext) -> Result<(), TaskError> {
fn job_knowledge_loop(ctx: &ExecutionContext) -> Result<(), TaskError> { fn job_knowledge_loop(ctx: &ExecutionContext) -> Result<(), TaskError> {
run_job(ctx, "knowledge-loop", || { run_job(ctx, "knowledge-loop", || {
let config = crate::knowledge::KnowledgeLoopConfig { let config = super::knowledge::KnowledgeLoopConfig {
max_cycles: 100, max_cycles: 100,
batch_size: 5, batch_size: 5,
..Default::default() ..Default::default()
}; };
ctx.log_line("running agents"); ctx.log_line("running agents");
let results = crate::knowledge::run_knowledge_loop(&config)?; let results = super::knowledge::run_knowledge_loop(&config)?;
ctx.log_line(format!("{} cycles, {} actions", ctx.log_line(format!("{} cycles, {} actions",
results.len(), results.len(),
results.iter().map(|r| r.total_applied).sum::<usize>())); results.iter().map(|r| r.total_applied).sum::<usize>()));
@ -329,7 +329,7 @@ pub fn run_daemon() -> Result<(), String> {
let stale = find_stale_sessions(); let stale = find_stale_sessions();
// Load mined transcript keys once for this tick // Load mined transcript keys once for this tick
let mined = crate::enrich::mined_transcript_keys(); let mined = super::enrich::mined_transcript_keys();
// Limit new tasks per tick — the resource pool gates execution, // Limit new tasks per tick — the resource pool gates execution,
// but we don't need thousands of task objects in the registry. // but we don't need thousands of task objects in the registry.
@ -372,7 +372,7 @@ pub fn run_daemon() -> Result<(), String> {
let path_str = session.to_string_lossy().to_string(); let path_str = session.to_string_lossy().to_string();
// Check for old-style whole-file mined key // Check for old-style whole-file mined key
let experience_done = crate::enrich::is_transcript_mined_with_keys(&mined, &path_str); let experience_done = super::enrich::is_transcript_mined_with_keys(&mined, &path_str);
if !experience_done { if !experience_done {
if is_file_open(&session) { if is_file_open(&session) {
@ -384,11 +384,11 @@ pub fn run_daemon() -> Result<(), String> {
let seg_count = if let Some(&cached) = seg_cache.get(&path_str) { let seg_count = if let Some(&cached) = seg_cache.get(&path_str) {
cached cached
} else { } else {
let messages = match crate::enrich::extract_conversation(&path_str) { let messages = match super::enrich::extract_conversation(&path_str) {
Ok(m) => m, Ok(m) => m,
Err(_) => continue, Err(_) => continue,
}; };
let count = crate::enrich::split_on_compaction(messages).len(); let count = super::enrich::split_on_compaction(messages).len();
seg_cache.insert(path_str.clone(), count); seg_cache.insert(path_str.clone(), count);
count count
}; };
@ -400,7 +400,7 @@ pub fn run_daemon() -> Result<(), String> {
} }
} else { } else {
// Multi-segment — find unmined segments // Multi-segment — find unmined segments
let fname_key = crate::enrich::transcript_filename_key(&path_str); let fname_key = super::enrich::transcript_filename_key(&path_str);
let mut unmined = 0; let mut unmined = 0;
for i in 0..seg_count { for i in 0..seg_count {
let seg_key = format!("{}.{}", fname_key, i); let seg_key = format!("{}.{}", fname_key, i);

View file

@ -5,7 +5,7 @@
// summarize weeklies. All three share the same generate/auto-detect // summarize weeklies. All three share the same generate/auto-detect
// pipeline, parameterized by DigestLevel. // pipeline, parameterized by DigestLevel.
use crate::llm::{call_sonnet, semantic_keys}; use super::llm::{call_sonnet, semantic_keys};
use crate::store::{self, Store, new_relation}; use crate::store::{self, Store, new_relation};
use crate::neuro; use crate::neuro;
@ -209,7 +209,7 @@ fn generate_digest(
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join(", "); .join(", ");
let prompt = neuro::load_prompt("digest", &[ let prompt = super::prompts::load_prompt("digest", &[
("{{LEVEL}}", level.title), ("{{LEVEL}}", level.title),
("{{PERIOD}}", level.period), ("{{PERIOD}}", level.period),
("{{INPUT_TITLE}}", level.input_title), ("{{INPUT_TITLE}}", level.input_title),

View file

@ -7,7 +7,7 @@
// Both extract conversation from JSONL transcripts, build prompts, call Sonnet, // Both extract conversation from JSONL transcripts, build prompts, call Sonnet,
// and apply results to the store. // and apply results to the store.
use crate::llm::{call_sonnet, parse_json_response, semantic_keys}; use super::llm::{call_sonnet, parse_json_response, semantic_keys};
use crate::neuro; use crate::neuro;
use crate::store::{self, Store, new_node, new_relation}; use crate::store::{self, Store, new_node, new_relation};
@ -174,7 +174,7 @@ fn build_journal_prompt(
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join("\n"); .join("\n");
neuro::load_prompt("journal-enrich", &[ super::prompts::load_prompt("journal-enrich", &[
("{{GREP_LINE}}", &grep_line.to_string()), ("{{GREP_LINE}}", &grep_line.to_string()),
("{{ENTRY_TEXT}}", entry_text), ("{{ENTRY_TEXT}}", entry_text),
("{{KEYS}}", &keys_text), ("{{KEYS}}", &keys_text),
@ -334,7 +334,7 @@ pub fn experience_mine(
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join("\n"); .join("\n");
let prompt = neuro::load_prompt("experience", &[ let prompt = super::prompts::load_prompt("experience", &[
("{{IDENTITY}}", &identity), ("{{IDENTITY}}", &identity),
("{{RECENT_JOURNAL}}", &recent), ("{{RECENT_JOURNAL}}", &recent),
("{{KEYS}}", &keys_text), ("{{KEYS}}", &keys_text),

View file

@ -6,7 +6,7 @@
// Uses Haiku (not Sonnet) for cost efficiency on high-volume extraction. // Uses Haiku (not Sonnet) for cost efficiency on high-volume extraction.
use crate::config; use crate::config;
use crate::llm; use super::llm;
use crate::store::{self, Provenance}; use crate::store::{self, Provenance};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};

View file

@ -11,7 +11,7 @@
// convergence via graph-structural metrics (sigma, CC, communities). // convergence via graph-structural metrics (sigma, CC, communities).
use crate::graph::Graph; use crate::graph::Graph;
use crate::llm; use super::llm;
use crate::spectral; use crate::spectral;
use crate::store::{self, Store, new_relation, RelationType}; use crate::store::{self, Store, new_relation, RelationType};
@ -329,7 +329,7 @@ fn agent_provenance(agent: &str) -> store::Provenance {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
fn load_prompt(name: &str) -> Result<String, String> { fn load_prompt(name: &str) -> Result<String, String> {
crate::neuro::load_prompt(name, &[]) super::prompts::load_prompt(name, &[])
} }
fn get_graph_topology(store: &Store, graph: &Graph) -> String { fn get_graph_topology(store: &Store, graph: &Graph) -> String {

View file

@ -0,0 +1,25 @@
// Agent layer: LLM-powered operations on the memory graph
//
// Everything here calls external models (Sonnet, Haiku) or orchestrates
// sequences of such calls. The core graph infrastructure (store, graph,
// spectral, search, similarity) lives at the crate root.
//
// llm — model invocation, response parsing
// prompts — prompt generation from store data
// audit — link quality review via Sonnet
// consolidate — full consolidation pipeline
// knowledge — knowledge production agents + convergence loop
// enrich — journal enrichment, experience mining
// fact_mine — fact extraction from transcripts
// digest — episodic digest generation (daily/weekly/monthly)
// daemon — background job scheduler
pub mod llm;
pub mod prompts;
pub mod audit;
pub mod consolidate;
pub mod knowledge;
pub mod enrich;
pub mod fact_mine;
pub mod digest;
pub mod daemon;

View file

@ -6,7 +6,7 @@ use crate::graph::Graph;
use crate::similarity; use crate::similarity;
use crate::spectral; use crate::spectral;
use super::scoring::{ use crate::neuro::{
ReplayItem, consolidation_priority, ReplayItem, consolidation_priority,
replay_queue, replay_queue_with_graph, detect_interference, replay_queue, replay_queue_with_graph, detect_interference,
}; };

View file

@ -3,25 +3,27 @@
// Re-exports modules so that memory-search and other binaries // Re-exports modules so that memory-search and other binaries
// can call library functions directly instead of shelling out. // can call library functions directly instead of shelling out.
// Core infrastructure
pub mod config; pub mod config;
pub mod store; pub mod store;
pub mod util; pub mod util;
pub mod llm;
pub mod digest;
pub mod audit;
pub mod enrich;
pub mod consolidate;
pub mod graph; pub mod graph;
pub mod search; pub mod search;
pub mod similarity; pub mod similarity;
pub mod migrate;
pub mod neuro;
pub mod query;
pub mod spectral; pub mod spectral;
pub mod lookups; pub mod lookups;
pub mod daemon; pub mod query;
pub mod fact_mine; pub mod migrate;
pub mod knowledge; pub mod neuro;
// Agent layer (LLM-powered operations)
pub mod agents;
// Re-export agent submodules at crate root for backwards compatibility
pub use agents::{
llm, audit, consolidate, knowledge,
enrich, fact_mine, digest, daemon,
};
pub mod memory_capnp { pub mod memory_capnp {
include!(concat!(env!("OUT_DIR"), "/schema/memory_capnp.rs")); include!(concat!(env!("OUT_DIR"), "/schema/memory_capnp.rs"));

View file

@ -878,11 +878,11 @@ fn cmd_consolidate_batch(count: usize, auto: bool, agent: Option<String>) -> Res
let store = store::Store::load()?; let store = store::Store::load()?;
if let Some(agent_name) = agent { if let Some(agent_name) = agent {
let prompt = neuro::agent_prompt(&store, &agent_name, count)?; let prompt = agents::prompts::agent_prompt(&store, &agent_name, count)?;
println!("{}", prompt); println!("{}", prompt);
Ok(()) Ok(())
} else { } else {
neuro::consolidation_batch(&store, count, auto) agents::prompts::consolidation_batch(&store, count, auto)
} }
} }

View file

@ -5,22 +5,17 @@
// rewrite — graph topology mutations: differentiation, closure, linking // rewrite — graph topology mutations: differentiation, closure, linking
mod scoring; mod scoring;
mod prompts;
mod rewrite; mod rewrite;
// Re-export public API so `neuro::` paths continue to work.
pub use scoring::{ pub use scoring::{
replay_queue, detect_interference, ReplayItem,
consolidation_priority,
replay_queue, replay_queue_with_graph,
detect_interference,
consolidation_plan, format_plan, consolidation_plan, format_plan,
daily_check, daily_check,
}; };
pub use prompts::{
load_prompt,
consolidation_batch, agent_prompt,
};
pub use rewrite::{ pub use rewrite::{
refine_target, LinkMove, refine_target, LinkMove,
differentiate_hub, differentiate_hub,