move LLM-dependent modules into agents/ subdir

Separate the agent layer (everything that calls external LLMs or
orchestrates sequences of such calls) from core graph infrastructure.

agents/: llm, prompts, audit, consolidate, knowledge, enrich,
         fact_mine, digest, daemon

Root: store/, graph, spectral, search, similarity, lookups, query,
      config, util, migrate, neuro/ (scoring + rewrite)

Re-exports at crate root preserve backwards compatibility so
`crate::llm`, `crate::digest` etc. continue to work.
This commit is contained in:
ProofOfConcept 2026-03-08 21:27:41 -04:00
parent 3dddc40841
commit cee9b76a7b
13 changed files with 68 additions and 46 deletions

View file

@ -3,7 +3,7 @@
// Each batch of links gets reviewed by Sonnet, which returns per-link actions:
// KEEP, DELETE, RETARGET, WEAKEN, STRENGTHEN. Batches run in parallel via rayon.
use crate::llm::call_sonnet;
use super::llm::call_sonnet;
use crate::store::{self, Store, new_relation};
use std::collections::HashSet;

View file

@ -10,8 +10,8 @@
//
// apply_consolidation() processes consolidation reports independently.
use crate::digest;
use crate::llm::{call_sonnet, parse_json_response};
use super::digest;
use super::llm::{call_sonnet, parse_json_response};
use crate::neuro;
use crate::store::{self, Store, new_relation};
@ -98,7 +98,7 @@ pub fn consolidate_full_with_progress(
*store = Store::load()?;
}
let prompt = match neuro::agent_prompt(store, agent_type, *count) {
let prompt = match super::prompts::agent_prompt(store, agent_type, *count) {
Ok(p) => p,
Err(e) => {
let msg = format!(" ERROR building prompt: {}", e);
@ -266,7 +266,7 @@ fn build_consolidation_prompt(store: &Store, report_keys: &[String]) -> Result<S
"=".repeat(60), key, content));
}
neuro::load_prompt("consolidation", &[("{{REPORTS}}", &report_text)])
super::prompts::load_prompt("consolidation", &[("{{REPORTS}}", &report_text)])
}
/// Run the full apply-consolidation pipeline.

View file

@ -103,7 +103,7 @@ fn job_experience_mine(ctx: &ExecutionContext, path: &str, segment: Option<usize
ctx.log_line("loading store");
let mut store = crate::store::Store::load()?;
ctx.log_line("mining");
let count = crate::enrich::experience_mine(&mut store, &path, segment)?;
let count = super::enrich::experience_mine(&mut store, &path, segment)?;
ctx.log_line(format!("{count} entries mined"));
Ok(())
})
@ -115,7 +115,7 @@ fn job_fact_mine(ctx: &ExecutionContext, path: &str) -> Result<(), TaskError> {
ctx.log_line("mining facts");
let p = std::path::Path::new(&path);
let progress = |msg: &str| { ctx.set_progress(msg); };
let count = crate::fact_mine::mine_and_store(p, Some(&progress))?;
let count = super::fact_mine::mine_and_store(p, Some(&progress))?;
ctx.log_line(format!("{count} facts stored"));
Ok(())
})
@ -125,7 +125,7 @@ fn job_consolidate(ctx: &ExecutionContext) -> Result<(), TaskError> {
run_job(ctx, "consolidate", || {
ctx.log_line("loading store");
let mut store = crate::store::Store::load()?;
crate::consolidate::consolidate_full_with_progress(&mut store, &|msg| {
super::consolidate::consolidate_full_with_progress(&mut store, &|msg| {
ctx.log_line(msg);
})
})
@ -133,13 +133,13 @@ fn job_consolidate(ctx: &ExecutionContext) -> Result<(), TaskError> {
fn job_knowledge_loop(ctx: &ExecutionContext) -> Result<(), TaskError> {
run_job(ctx, "knowledge-loop", || {
let config = crate::knowledge::KnowledgeLoopConfig {
let config = super::knowledge::KnowledgeLoopConfig {
max_cycles: 100,
batch_size: 5,
..Default::default()
};
ctx.log_line("running agents");
let results = crate::knowledge::run_knowledge_loop(&config)?;
let results = super::knowledge::run_knowledge_loop(&config)?;
ctx.log_line(format!("{} cycles, {} actions",
results.len(),
results.iter().map(|r| r.total_applied).sum::<usize>()));
@ -329,7 +329,7 @@ pub fn run_daemon() -> Result<(), String> {
let stale = find_stale_sessions();
// Load mined transcript keys once for this tick
let mined = crate::enrich::mined_transcript_keys();
let mined = super::enrich::mined_transcript_keys();
// Limit new tasks per tick — the resource pool gates execution,
// but we don't need thousands of task objects in the registry.
@ -372,7 +372,7 @@ pub fn run_daemon() -> Result<(), String> {
let path_str = session.to_string_lossy().to_string();
// Check for old-style whole-file mined key
let experience_done = crate::enrich::is_transcript_mined_with_keys(&mined, &path_str);
let experience_done = super::enrich::is_transcript_mined_with_keys(&mined, &path_str);
if !experience_done {
if is_file_open(&session) {
@ -384,11 +384,11 @@ pub fn run_daemon() -> Result<(), String> {
let seg_count = if let Some(&cached) = seg_cache.get(&path_str) {
cached
} else {
let messages = match crate::enrich::extract_conversation(&path_str) {
let messages = match super::enrich::extract_conversation(&path_str) {
Ok(m) => m,
Err(_) => continue,
};
let count = crate::enrich::split_on_compaction(messages).len();
let count = super::enrich::split_on_compaction(messages).len();
seg_cache.insert(path_str.clone(), count);
count
};
@ -400,7 +400,7 @@ pub fn run_daemon() -> Result<(), String> {
}
} else {
// Multi-segment — find unmined segments
let fname_key = crate::enrich::transcript_filename_key(&path_str);
let fname_key = super::enrich::transcript_filename_key(&path_str);
let mut unmined = 0;
for i in 0..seg_count {
let seg_key = format!("{}.{}", fname_key, i);

View file

@ -5,7 +5,7 @@
// summarize weeklies. All three share the same generate/auto-detect
// pipeline, parameterized by DigestLevel.
use crate::llm::{call_sonnet, semantic_keys};
use super::llm::{call_sonnet, semantic_keys};
use crate::store::{self, Store, new_relation};
use crate::neuro;
@ -209,7 +209,7 @@ fn generate_digest(
.collect::<Vec<_>>()
.join(", ");
let prompt = neuro::load_prompt("digest", &[
let prompt = super::prompts::load_prompt("digest", &[
("{{LEVEL}}", level.title),
("{{PERIOD}}", level.period),
("{{INPUT_TITLE}}", level.input_title),

View file

@ -7,7 +7,7 @@
// Both extract conversation from JSONL transcripts, build prompts, call Sonnet,
// and apply results to the store.
use crate::llm::{call_sonnet, parse_json_response, semantic_keys};
use super::llm::{call_sonnet, parse_json_response, semantic_keys};
use crate::neuro;
use crate::store::{self, Store, new_node, new_relation};
@ -174,7 +174,7 @@ fn build_journal_prompt(
.collect::<Vec<_>>()
.join("\n");
neuro::load_prompt("journal-enrich", &[
super::prompts::load_prompt("journal-enrich", &[
("{{GREP_LINE}}", &grep_line.to_string()),
("{{ENTRY_TEXT}}", entry_text),
("{{KEYS}}", &keys_text),
@ -334,7 +334,7 @@ pub fn experience_mine(
.collect::<Vec<_>>()
.join("\n");
let prompt = neuro::load_prompt("experience", &[
let prompt = super::prompts::load_prompt("experience", &[
("{{IDENTITY}}", &identity),
("{{RECENT_JOURNAL}}", &recent),
("{{KEYS}}", &keys_text),

View file

@ -6,7 +6,7 @@
// Uses Haiku (not Sonnet) for cost efficiency on high-volume extraction.
use crate::config;
use crate::llm;
use super::llm;
use crate::store::{self, Provenance};
use serde::{Deserialize, Serialize};

View file

@ -11,7 +11,7 @@
// convergence via graph-structural metrics (sigma, CC, communities).
use crate::graph::Graph;
use crate::llm;
use super::llm;
use crate::spectral;
use crate::store::{self, Store, new_relation, RelationType};
@ -329,7 +329,7 @@ fn agent_provenance(agent: &str) -> store::Provenance {
// ---------------------------------------------------------------------------
fn load_prompt(name: &str) -> Result<String, String> {
crate::neuro::load_prompt(name, &[])
super::prompts::load_prompt(name, &[])
}
fn get_graph_topology(store: &Store, graph: &Graph) -> String {

View file

@ -0,0 +1,25 @@
// Agent layer: LLM-powered operations on the memory graph
//
// Everything here calls external models (Sonnet, Haiku) or orchestrates
// sequences of such calls. The core graph infrastructure (store, graph,
// spectral, search, similarity) lives at the crate root.
//
// llm — model invocation, response parsing
// prompts — prompt generation from store data
// audit — link quality review via Sonnet
// consolidate — full consolidation pipeline
// knowledge — knowledge production agents + convergence loop
// enrich — journal enrichment, experience mining
// fact_mine — fact extraction from transcripts
// digest — episodic digest generation (daily/weekly/monthly)
// daemon — background job scheduler
pub mod llm;
pub mod prompts;
pub mod audit;
pub mod consolidate;
pub mod knowledge;
pub mod enrich;
pub mod fact_mine;
pub mod digest;
pub mod daemon;

View file

@ -6,7 +6,7 @@ use crate::graph::Graph;
use crate::similarity;
use crate::spectral;
use super::scoring::{
use crate::neuro::{
ReplayItem, consolidation_priority,
replay_queue, replay_queue_with_graph, detect_interference,
};

View file

@ -3,25 +3,27 @@
// Re-exports modules so that memory-search and other binaries
// can call library functions directly instead of shelling out.
// Core infrastructure
pub mod config;
pub mod store;
pub mod util;
pub mod llm;
pub mod digest;
pub mod audit;
pub mod enrich;
pub mod consolidate;
pub mod graph;
pub mod search;
pub mod similarity;
pub mod migrate;
pub mod neuro;
pub mod query;
pub mod spectral;
pub mod lookups;
pub mod daemon;
pub mod fact_mine;
pub mod knowledge;
pub mod query;
pub mod migrate;
pub mod neuro;
// Agent layer (LLM-powered operations)
pub mod agents;
// Re-export agent submodules at crate root for backwards compatibility
pub use agents::{
llm, audit, consolidate, knowledge,
enrich, fact_mine, digest, daemon,
};
pub mod memory_capnp {
include!(concat!(env!("OUT_DIR"), "/schema/memory_capnp.rs"));

View file

@ -878,11 +878,11 @@ fn cmd_consolidate_batch(count: usize, auto: bool, agent: Option<String>) -> Res
let store = store::Store::load()?;
if let Some(agent_name) = agent {
let prompt = neuro::agent_prompt(&store, &agent_name, count)?;
let prompt = agents::prompts::agent_prompt(&store, &agent_name, count)?;
println!("{}", prompt);
Ok(())
} else {
neuro::consolidation_batch(&store, count, auto)
agents::prompts::consolidation_batch(&store, count, auto)
}
}

View file

@ -5,22 +5,17 @@
// rewrite — graph topology mutations: differentiation, closure, linking
mod scoring;
mod prompts;
mod rewrite;
// Re-export public API so `neuro::` paths continue to work.
pub use scoring::{
replay_queue, detect_interference,
ReplayItem,
consolidation_priority,
replay_queue, replay_queue_with_graph,
detect_interference,
consolidation_plan, format_plan,
daily_check,
};
pub use prompts::{
load_prompt,
consolidation_batch, agent_prompt,
};
pub use rewrite::{
refine_target, LinkMove,
differentiate_hub,