restructure: hippocampus/ for memory, subconscious/ for agents

hippocampus/ — memory storage, retrieval, and consolidation:
  store, graph, query, similarity, spectral, neuro, counters,
  config, transcript, memory_search, lookups, cursor, migrate

subconscious/ — autonomous agents that process without being asked:
  reflect, surface, consolidate, digest, audit, etc.

All existing crate::X paths preserved via re-exports in lib.rs.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
ProofOfConcept 2026-03-25 01:04:13 -04:00 committed by Kent Overstreet
parent cfed85bd20
commit d5c0e86700
39 changed files with 87 additions and 32 deletions

View file

@ -0,0 +1,25 @@
// Neuroscience-inspired memory algorithms, split by concern:
//
// scoring — pure analysis: priority, replay queues, interference, plans
// prompts — agent prompt generation and formatting
// rewrite — graph topology mutations: differentiation, closure, linking
mod scoring;
mod rewrite;
pub use scoring::{
ReplayItem,
ConsolidationPlan,
consolidation_priority,
replay_queue, replay_queue_with_graph,
detect_interference,
consolidation_plan, consolidation_plan_quick, format_plan,
daily_check,
};
pub use rewrite::{
refine_target, LinkMove,
differentiate_hub,
apply_differentiation, find_differentiable_hubs,
triangle_close, link_orphans,
};

View file

@ -0,0 +1,348 @@
// Graph topology mutations: hub differentiation, triangle closure,
// orphan linking, and link refinement. These modify the store.
use crate::store::{Store, new_relation};
use crate::graph::Graph;
use crate::similarity;
/// Collect (key, content) pairs for all section children of a file-level node.
fn section_children<'a>(store: &'a Store, file_key: &str) -> Vec<(&'a str, &'a str)> {
let prefix = format!("{}#", file_key);
store.nodes.iter()
.filter(|(k, _)| k.starts_with(&prefix))
.map(|(k, n)| (k.as_str(), n.content.as_str()))
.collect()
}
/// Find the best matching candidate by cosine similarity against content.
/// Returns (key, similarity) if any candidate exceeds threshold.
fn best_match(candidates: &[(&str, &str)], content: &str, threshold: f32) -> Option<(String, f32)> {
let (best_key, best_sim) = candidates.iter()
.map(|(key, text)| (*key, similarity::cosine_similarity(content, text)))
.max_by(|a, b| a.1.total_cmp(&b.1))?;
if best_sim > threshold {
Some((best_key.to_string(), best_sim))
} else {
None
}
}
/// Refine a link target: if the target is a file-level node with section
/// children, find the best-matching section by cosine similarity against
/// the source content. Returns the original key if no sections exist or
/// no section matches above threshold.
///
/// This prevents hub formation at link creation time — every new link
/// targets the most specific available node.
pub fn refine_target(store: &Store, source_content: &str, target_key: &str) -> String {
// Only refine file-level nodes (no # in key)
if target_key.contains('#') { return target_key.to_string(); }
let sections = section_children(store, target_key);
if sections.is_empty() { return target_key.to_string(); }
best_match(&sections, source_content, 0.05)
.map(|(key, _)| key)
.unwrap_or_else(|| target_key.to_string())
}
/// A proposed link move: from hub→neighbor to section→neighbor
pub struct LinkMove {
pub neighbor_key: String,
pub from_hub: String,
pub to_section: String,
pub similarity: f32,
pub neighbor_snippet: String,
}
/// Analyze a hub node and propose redistributing its links to child sections.
///
/// Returns None if the node isn't a hub or has no sections to redistribute to.
pub fn differentiate_hub(store: &Store, hub_key: &str) -> Option<Vec<LinkMove>> {
let graph = store.build_graph();
differentiate_hub_with_graph(store, hub_key, &graph)
}
/// Like differentiate_hub but uses a pre-built graph.
pub fn differentiate_hub_with_graph(store: &Store, hub_key: &str, graph: &Graph) -> Option<Vec<LinkMove>> {
let degree = graph.degree(hub_key);
// Only differentiate actual hubs
if degree < 20 { return None; }
// Only works on file-level nodes that have section children
if hub_key.contains('#') { return None; }
let sections = section_children(store, hub_key);
if sections.is_empty() { return None; }
// Get all neighbors of the hub
let neighbors = graph.neighbors(hub_key);
let prefix = format!("{}#", hub_key);
let mut moves = Vec::new();
for (neighbor_key, _strength) in &neighbors {
// Skip section children — they should stay linked to parent
if neighbor_key.starts_with(&prefix) { continue; }
let neighbor_content = match store.nodes.get(neighbor_key.as_str()) {
Some(n) => &n.content,
None => continue,
};
// Find best-matching section by content similarity
if let Some((best_section, best_sim)) = best_match(&sections, neighbor_content, 0.05) {
let snippet = crate::util::first_n_chars(
neighbor_content.lines()
.find(|l| !l.is_empty() && !l.starts_with("<!--") && !l.starts_with("##"))
.unwrap_or(""),
80);
moves.push(LinkMove {
neighbor_key: neighbor_key.to_string(),
from_hub: hub_key.to_string(),
to_section: best_section,
similarity: best_sim,
neighbor_snippet: snippet,
});
}
}
moves.sort_by(|a, b| b.similarity.total_cmp(&a.similarity));
Some(moves)
}
/// Apply link moves: soft-delete hub→neighbor, create section→neighbor.
pub fn apply_differentiation(
store: &mut Store,
moves: &[LinkMove],
) -> (usize, usize) {
let mut applied = 0usize;
let mut skipped = 0usize;
for mv in moves {
// Check that section→neighbor doesn't already exist
let exists = store.relations.iter().any(|r|
((r.source_key == mv.to_section && r.target_key == mv.neighbor_key)
|| (r.source_key == mv.neighbor_key && r.target_key == mv.to_section))
&& !r.deleted
);
if exists { skipped += 1; continue; }
let section_uuid = match store.nodes.get(&mv.to_section) {
Some(n) => n.uuid,
None => { skipped += 1; continue; }
};
let neighbor_uuid = match store.nodes.get(&mv.neighbor_key) {
Some(n) => n.uuid,
None => { skipped += 1; continue; }
};
// Soft-delete old hub→neighbor relation
for rel in &mut store.relations {
if ((rel.source_key == mv.from_hub && rel.target_key == mv.neighbor_key)
|| (rel.source_key == mv.neighbor_key && rel.target_key == mv.from_hub))
&& !rel.deleted
{
rel.deleted = true;
}
}
// Create new section→neighbor relation
let new_rel = new_relation(
section_uuid, neighbor_uuid,
crate::store::RelationType::Auto,
0.5,
&mv.to_section, &mv.neighbor_key,
);
if store.add_relation(new_rel).is_ok() {
applied += 1;
}
}
(applied, skipped)
}
/// Find all file-level hubs that have section children to split into.
pub fn find_differentiable_hubs(store: &Store) -> Vec<(String, usize, usize)> {
let graph = store.build_graph();
let threshold = graph.hub_threshold();
let mut hubs = Vec::new();
for key in graph.nodes() {
let deg = graph.degree(key);
if deg < threshold { continue; }
if key.contains('#') { continue; }
let section_count = section_children(store, key).len();
if section_count > 0 {
hubs.push((key.clone(), deg, section_count));
}
}
hubs.sort_by(|a, b| b.1.cmp(&a.1));
hubs
}
/// Triangle closure: for each node with degree >= min_degree, find pairs
/// of its neighbors that aren't directly connected and have cosine
/// similarity above sim_threshold. Add links between them.
///
/// This turns hub-spoke patterns into triangles, directly improving
/// clustering coefficient and schema fit.
pub fn triangle_close(
store: &mut Store,
min_degree: usize,
sim_threshold: f32,
max_links_per_hub: usize,
) -> (usize, usize) {
let graph = store.build_graph();
let mut added = 0usize;
let mut hubs_processed = 0usize;
// Get nodes sorted by degree (highest first)
let mut candidates: Vec<(String, usize)> = graph.nodes().iter()
.map(|k| (k.clone(), graph.degree(k)))
.filter(|(_, d)| *d >= min_degree)
.collect();
candidates.sort_by(|a, b| b.1.cmp(&a.1));
for (hub_key, hub_deg) in &candidates {
let neighbors = graph.neighbor_keys(hub_key);
if neighbors.len() < 2 { continue; }
// Collect neighbor content for similarity
let neighbor_docs: Vec<(String, String)> = neighbors.iter()
.filter_map(|&k| {
store.nodes.get(k).map(|n| (k.to_string(), n.content.clone()))
})
.collect();
// Find unconnected pairs with high similarity
let mut pair_scores: Vec<(String, String, f32)> = Vec::new();
for i in 0..neighbor_docs.len() {
for j in (i + 1)..neighbor_docs.len() {
// Check if already connected
let n_i = graph.neighbor_keys(&neighbor_docs[i].0);
if n_i.contains(neighbor_docs[j].0.as_str()) { continue; }
let sim = similarity::cosine_similarity(
&neighbor_docs[i].1, &neighbor_docs[j].1);
if sim >= sim_threshold {
pair_scores.push((
neighbor_docs[i].0.clone(),
neighbor_docs[j].0.clone(),
sim,
));
}
}
}
pair_scores.sort_by(|a, b| b.2.total_cmp(&a.2));
let to_add = pair_scores.len().min(max_links_per_hub);
if to_add > 0 {
println!(" {} (deg={}) — {} triangles to close (top {})",
hub_key, hub_deg, pair_scores.len(), to_add);
for (a, b, sim) in pair_scores.iter().take(to_add) {
let uuid_a = match store.nodes.get(a) { Some(n) => n.uuid, None => continue };
let uuid_b = match store.nodes.get(b) { Some(n) => n.uuid, None => continue };
let rel = new_relation(
uuid_a, uuid_b,
crate::store::RelationType::Auto,
sim * 0.5, // scale by similarity
a, b,
);
if let Ok(()) = store.add_relation(rel) {
added += 1;
}
}
hubs_processed += 1;
}
}
if added > 0 {
let _ = store.save();
}
(hubs_processed, added)
}
/// Link orphan nodes (degree < min_degree) to their most textually similar
/// connected nodes. For each orphan, finds top-K nearest neighbors by
/// cosine similarity and creates Auto links.
/// Returns (orphans_linked, total_links_added).
pub fn link_orphans(
store: &mut Store,
min_degree: usize,
links_per_orphan: usize,
sim_threshold: f32,
) -> (usize, usize) {
let graph = store.build_graph();
let mut added = 0usize;
let mut orphans_linked = 0usize;
// Separate orphans from connected nodes
let orphans: Vec<String> = graph.nodes().iter()
.filter(|k| graph.degree(k) < min_degree)
.cloned()
.collect();
// Build candidate pool: connected nodes with their content
let candidates: Vec<(String, String)> = graph.nodes().iter()
.filter(|k| graph.degree(k) >= min_degree)
.filter_map(|k| store.nodes.get(k).map(|n| (k.clone(), n.content.clone())))
.collect();
if candidates.is_empty() { return (0, 0); }
for orphan_key in &orphans {
let orphan_content = match store.nodes.get(orphan_key) {
Some(n) => n.content.clone(),
None => continue,
};
if orphan_content.len() < 20 { continue; } // skip near-empty nodes
// Score against all candidates
let mut scores: Vec<(usize, f32)> = candidates.iter()
.enumerate()
.map(|(i, (_, content))| {
(i, similarity::cosine_similarity(&orphan_content, content))
})
.filter(|(_, s)| *s >= sim_threshold)
.collect();
scores.sort_by(|a, b| b.1.total_cmp(&a.1));
let to_link = scores.len().min(links_per_orphan);
if to_link == 0 { continue; }
let orphan_uuid = store.nodes.get(orphan_key).unwrap().uuid;
for &(idx, sim) in scores.iter().take(to_link) {
let target_key = &candidates[idx].0;
let target_uuid = match store.nodes.get(target_key) {
Some(n) => n.uuid,
None => continue,
};
let rel = new_relation(
orphan_uuid, target_uuid,
crate::store::RelationType::Auto,
sim * 0.5,
orphan_key, target_key,
);
if store.add_relation(rel).is_ok() {
added += 1;
}
}
orphans_linked += 1;
}
if added > 0 {
let _ = store.save();
}
(orphans_linked, added)
}

View file

@ -0,0 +1,446 @@
// Consolidation scoring, replay queues, interference detection, and
// graph health metrics. Pure analysis — no store mutations.
use crate::store::{Store, now_epoch};
use crate::graph::{self, Graph};
use crate::spectral::{self, SpectralEmbedding, SpectralPosition};
use std::collections::HashMap;
const SECS_PER_DAY: f64 = 86400.0;
/// Consolidation priority: how urgently a node needs attention.
///
/// With spectral data:
/// priority = spectral_displacement × overdue × emotion
/// Without:
/// priority = (1 - cc) × overdue × emotion
///
/// Spectral displacement is the outlier_score clamped and normalized —
/// it measures how far a node sits from its community center in the
/// eigenspace. This is a global signal (considers all graph structure)
/// vs CC which is local (only immediate neighbors).
pub fn consolidation_priority(
store: &Store,
key: &str,
graph: &Graph,
spectral_outlier: Option<f64>,
) -> f64 {
let node = match store.nodes.get(key) {
Some(n) => n,
None => return 0.0,
};
// Integration factor: how poorly integrated is this node?
let displacement = if let Some(outlier) = spectral_outlier {
// outlier_score = dist_to_center / median_dist_in_community
// 1.0 = typical position, >2 = unusual, >5 = extreme outlier
// Use log scale for dynamic range: the difference between
// outlier=5 and outlier=10 matters less than 1 vs 2.
(outlier / 3.0).min(3.0)
} else {
let cc = graph.clustering_coefficient(key) as f64;
1.0 - cc
};
// Spaced repetition: how overdue is this node for replay?
let interval_secs = node.spaced_repetition_interval as f64 * SECS_PER_DAY;
let time_since_replay = if node.last_replayed > 0 {
(now_epoch() - node.last_replayed).max(0) as f64
} else {
interval_secs * 3.0
};
let overdue_ratio = (time_since_replay / interval_secs).min(5.0);
// Emotional intensity: higher emotion = higher priority
let emotion_factor = 1.0 + (node.emotion as f64 / 10.0);
displacement * overdue_ratio * emotion_factor
}
/// Item in the replay queue
pub struct ReplayItem {
pub key: String,
pub priority: f64,
pub interval_days: u32,
pub emotion: f32,
pub cc: f32,
/// Spectral classification: "bridge", "outlier", "core", "peripheral"
pub classification: &'static str,
/// Raw spectral outlier score (distance / median)
pub outlier_score: f64,
}
/// Generate the replay queue: nodes ordered by consolidation priority.
/// Automatically loads spectral embedding if available.
pub fn replay_queue(store: &Store, count: usize) -> Vec<ReplayItem> {
let graph = store.build_graph();
let emb = spectral::load_embedding().ok();
replay_queue_with_graph(store, count, &graph, emb.as_ref())
}
/// Generate the replay queue using pre-built graph and optional spectral data.
pub fn replay_queue_with_graph(
store: &Store,
count: usize,
graph: &Graph,
emb: Option<&SpectralEmbedding>,
) -> Vec<ReplayItem> {
// Build spectral position map if embedding is available
let positions: HashMap<String, SpectralPosition> = if let Some(emb) = emb {
let communities = graph.communities().clone();
spectral::analyze_positions(emb, &communities)
.into_iter()
.map(|p| (p.key.clone(), p))
.collect()
} else {
HashMap::new()
};
let mut items: Vec<ReplayItem> = store.nodes.iter()
.map(|(key, node)| {
let pos = positions.get(key);
let outlier_score = pos.map(|p| p.outlier_score).unwrap_or(0.0);
let classification = pos
.map(spectral::classify_position)
.unwrap_or("unknown");
let priority = consolidation_priority(
store, key, graph,
pos.map(|p| p.outlier_score),
);
ReplayItem {
key: key.clone(),
priority,
interval_days: node.spaced_repetition_interval,
emotion: node.emotion,
cc: graph.clustering_coefficient(key),
classification,
outlier_score,
}
})
.collect();
items.sort_by(|a, b| b.priority.total_cmp(&a.priority));
items.truncate(count);
items
}
/// Detect interfering memory pairs: high text similarity but different communities
pub fn detect_interference(
store: &Store,
graph: &Graph,
threshold: f32,
) -> Vec<(String, String, f32)> {
use crate::similarity;
let communities = graph.communities();
// Only compare nodes within a reasonable set — take the most active ones
let mut docs: Vec<(String, String)> = store.nodes.iter()
.filter(|(_, n)| n.content.len() > 50) // skip tiny nodes
.map(|(k, n)| (k.clone(), n.content.clone()))
.collect();
// For large stores, sample to keep pairwise comparison feasible
if docs.len() > 200 {
docs.sort_by(|a, b| b.1.len().cmp(&a.1.len()));
docs.truncate(200);
}
let similar = similarity::pairwise_similar(&docs, threshold);
// Filter to pairs in different communities
similar.into_iter()
.filter(|(a, b, _)| {
let ca = communities.get(a);
let cb = communities.get(b);
match (ca, cb) {
(Some(a), Some(b)) => a != b,
_ => true, // if community unknown, flag it
}
})
.collect()
}
/// Agent allocation from the control loop.
/// Agent types and counts are data-driven — add agents by adding
/// entries to the counts map.
#[derive(Default)]
pub struct ConsolidationPlan {
/// agent_name → run count
pub counts: std::collections::HashMap<String, usize>,
pub run_health: bool,
pub rationale: Vec<String>,
}
impl ConsolidationPlan {
pub fn count(&self, agent: &str) -> usize {
self.counts.get(agent).copied().unwrap_or(0)
}
pub fn set(&mut self, agent: &str, count: usize) {
self.counts.insert(agent.to_string(), count);
}
pub fn add(&mut self, agent: &str, count: usize) {
*self.counts.entry(agent.to_string()).or_default() += count;
}
pub fn total(&self) -> usize {
self.counts.values().sum::<usize>() + if self.run_health { 1 } else { 0 }
}
/// Expand the plan into a flat list of (agent_name, batch_size) runs.
/// Interleaves agent types so different types alternate.
pub fn to_agent_runs(&self, batch_size: usize) -> Vec<(String, usize)> {
let mut runs = Vec::new();
if self.run_health {
runs.push(("health".to_string(), 0));
}
// Sort by count descending so high-volume agents interleave well
let mut types: Vec<(&String, &usize)> = self.counts.iter()
.filter(|(_, c)| **c > 0)
.collect();
types.sort_by(|a, b| b.1.cmp(a.1));
let mut queues: Vec<Vec<(String, usize)>> = types.iter().map(|(name, count)| {
let mut q = Vec::new();
let mut remaining = **count;
while remaining > 0 {
let batch = remaining.min(batch_size);
q.push((name.to_string(), batch));
remaining -= batch;
}
q
}).collect();
// Round-robin interleave
loop {
let mut added = false;
for q in &mut queues {
if let Some(run) = q.first() {
runs.push(run.clone());
q.remove(0);
added = true;
}
}
if !added { break; }
}
runs
}
}
/// Analyze metrics and decide how much each agent needs to run.
///
/// This is the control loop: metrics → error signal → agent allocation.
/// Target values are based on healthy small-world networks.
pub fn consolidation_plan(store: &Store) -> ConsolidationPlan {
consolidation_plan_inner(store, true)
}
/// Cheap version: skip O(n²) interference detection (for daemon status).
pub fn consolidation_plan_quick(store: &Store) -> ConsolidationPlan {
consolidation_plan_inner(store, false)
}
fn consolidation_plan_inner(store: &Store, detect_interf: bool) -> ConsolidationPlan {
let graph = store.build_graph();
let alpha = graph.degree_power_law_exponent();
let gini = graph.degree_gini();
let _avg_cc = graph.avg_clustering_coefficient();
let interference_count = if detect_interf {
detect_interference(store, &graph, 0.5).len()
} else {
0
};
let episodic_count = store.nodes.iter()
.filter(|(_, n)| matches!(n.node_type, crate::store::NodeType::EpisodicSession))
.count();
let _episodic_ratio = if store.nodes.is_empty() { 0.0 }
else { episodic_count as f32 / store.nodes.len() as f32 };
let mut plan = ConsolidationPlan {
counts: std::collections::HashMap::new(),
run_health: true,
rationale: Vec::new(),
};
// Active agent types from config
let config = crate::config::get();
let agent_types: Vec<&str> = config.agent_types.iter().map(|s| s.as_str()).collect();
// Target: α ≥ 2.5 (healthy scale-free)
if alpha < 2.0 {
plan.add("linker", 100);
plan.rationale.push(format!(
"α={:.2} (target ≥2.5): extreme hub dominance → 100 linker", alpha));
} else if alpha < 2.5 {
plan.add("linker", 50);
plan.rationale.push(format!(
"α={:.2} (target ≥2.5): moderate hub dominance → 50 linker", alpha));
} else {
plan.add("linker", 20);
plan.rationale.push(format!(
"α={:.2}: healthy — 20 linker for maintenance", alpha));
}
// Target: Gini ≤ 0.4
if gini > 0.5 {
plan.add("linker", 50);
plan.rationale.push(format!(
"Gini={:.3} (target ≤0.4): high inequality → +50 linker", gini));
}
// Interference: separator disambiguates confusable nodes
if interference_count > 100 {
plan.add("separator", 10);
plan.rationale.push(format!(
"Interference: {} pairs (target <50) → 10 separator", interference_count));
} else if interference_count > 20 {
plan.add("separator", 5);
plan.rationale.push(format!(
"Interference: {} pairs → 5 separator", interference_count));
} else if interference_count > 0 {
plan.add("separator", interference_count.min(3));
}
// Organize: proportional to linker — synthesizes what linker connects
let linker = plan.count("linker");
plan.set("organize", linker / 2);
plan.rationale.push(format!(
"Organize: {} (half of linker count)", plan.count("organize")));
// Distill: core concept maintenance
let organize = plan.count("organize");
let mut distill = organize;
if gini > 0.4 { distill += 20; }
if alpha < 2.0 { distill += 20; }
plan.set("distill", distill);
plan.rationale.push(format!(
"Distill: {} (synthesize hub content)", plan.count("distill")));
// Split: handle oversized nodes
plan.set("split", 5);
// Distribute agent budget using Elo ratings
let budget = crate::config::get().agent_budget;
let elo_path = crate::config::get().data_dir.join("agent-elo.json");
if let Ok(elo_json) = std::fs::read_to_string(&elo_path) {
if let Ok(ratings) = serde_json::from_str::<std::collections::HashMap<String, f64>>(&elo_json) {
let elos: Vec<f64> = agent_types.iter()
.map(|t| ratings.get(*t).copied().unwrap_or(1000.0))
.collect();
let min_elo = elos.iter().copied().fold(f64::MAX, f64::min);
let weights: Vec<f64> = elos.iter()
.map(|e| {
let shifted = e - min_elo + 50.0;
shifted * shifted
})
.collect();
let total_weight: f64 = weights.iter().sum();
let allocate = |w: f64| -> usize {
((w / total_weight * budget as f64).round() as usize).max(2)
};
for (i, agent) in agent_types.iter().enumerate() {
plan.set(agent, allocate(weights[i]));
}
let summary: Vec<String> = agent_types.iter()
.map(|a| format!("{}={}", a, plan.count(a)))
.collect();
plan.rationale.push(format!(
"Elo allocation (budget={}): {}", budget, summary.join(" ")));
}
} else {
// No Elo file — use budget with equal distribution
let per_type = budget / agent_types.len();
for agent in &agent_types {
plan.set(agent, per_type);
}
plan.rationale.push(format!(
"No Elo ratings — equal distribution ({} each, budget={})", per_type, budget));
}
plan
}
/// Format the consolidation plan for display
pub fn format_plan(plan: &ConsolidationPlan) -> String {
let mut out = String::from("Consolidation Plan\n==================\n\n");
out.push_str("Analysis:\n");
for r in &plan.rationale {
out.push_str(&format!("{}\n", r));
}
out.push_str("\nAgent allocation:\n");
if plan.run_health {
out.push_str(" 1. health — system audit\n");
}
let mut step = 2;
let mut sorted: Vec<_> = plan.counts.iter()
.filter(|(_, c)| **c > 0)
.collect();
sorted.sort_by(|a, b| b.1.cmp(a.1));
for (agent, count) in &sorted {
out.push_str(&format!(" {}. {} ×{}\n", step, agent, count));
step += 1;
}
out.push_str(&format!("\nTotal agent runs: {}\n", plan.total()));
out
}
/// Brief daily check: compare current metrics to last snapshot
pub fn daily_check(store: &Store) -> String {
let graph_obj = store.build_graph();
let snap = graph::current_metrics(&graph_obj);
let history = graph::load_metrics_history();
let prev = history.last();
let mut out = String::from("Memory daily check\n");
// Current state
out.push_str(&format!(" σ={:.1} α={:.2} gini={:.3} cc={:.4}\n",
snap.sigma, snap.alpha, snap.gini, snap.avg_cc));
// Trend
if let Some(p) = prev {
let d_sigma = snap.sigma - p.sigma;
let d_alpha = snap.alpha - p.alpha;
let d_gini = snap.gini - p.gini;
out.push_str(&format!(" Δσ={:+.1} Δα={:+.2} Δgini={:+.3}\n",
d_sigma, d_alpha, d_gini));
// Assessment
let mut issues = Vec::new();
if snap.alpha < 2.0 { issues.push("hub dominance critical"); }
if snap.gini > 0.5 { issues.push("high inequality"); }
if snap.avg_cc < 0.1 { issues.push("poor integration"); }
if d_sigma < -5.0 { issues.push("σ declining"); }
if d_alpha < -0.1 { issues.push("α declining"); }
if d_gini > 0.02 { issues.push("inequality increasing"); }
if issues.is_empty() {
out.push_str(" Status: healthy\n");
} else {
out.push_str(&format!(" Status: needs attention — {}\n", issues.join(", ")));
out.push_str(" Run: poc-memory consolidate-session\n");
}
} else {
out.push_str(" (first snapshot, no trend data yet)\n");
}
// Persist the snapshot
graph::save_metrics_snapshot(&snap);
out
}