consciousness/src/cli/admin.rs

395 lines
14 KiB
Rust
Raw Normal View History

// cli/admin.rs — admin subcommand handlers
use anyhow::Result;
use crate::hippocampus as memory;
use crate::hippocampus::store;
2026-04-12 23:19:28 -04:00
fn install_default_file(data_dir: &std::path::Path, name: &str, content: &str) -> Result<()> {
let path = data_dir.join(name);
if !path.exists() {
std::fs::write(&path, content)?;
println!("Created {}", path.display());
}
Ok(())
}
pub async fn cmd_init() -> Result<()> {
let cfg = crate::config::get();
// Ensure data directory exists
std::fs::create_dir_all(&cfg.data_dir)?;
// Install filesystem files (not store nodes)
install_default_file(&cfg.data_dir, "instructions.md",
include_str!("../../defaults/instructions.md"))?;
install_default_file(&cfg.data_dir, "on-consciousness.md",
include_str!("../../defaults/on-consciousness.md"))?;
// Seed identity node if empty
let store = memory::access_local()?;
if !store.contains_key("identity").unwrap_or(false) {
let default = include_str!("../../defaults/identity.md");
store.upsert("identity", default)?;
println!("Seeded identity in store");
}
store.save()?;
println!("Initialized with {} nodes", store.all_keys().unwrap_or_default().len());
// Create config if none exists
let config_path = std::env::var("POC_MEMORY_CONFIG")
.map(std::path::PathBuf::from)
.unwrap_or_else(|_| {
dirs::home_dir().unwrap_or_default()
.join(".consciousness/config.jsonl")
});
if !config_path.exists() {
let config_dir = config_path.parent().unwrap();
std::fs::create_dir_all(config_dir)?;
let example = include_str!("../../config.example.jsonl");
std::fs::write(&config_path, example)?;
println!("Created config at {} — edit with your name and context groups",
config_path.display());
}
println!("Done. Run `poc-memory load-context --stats` to verify.");
Ok(())
}
pub async fn cmd_fsck() -> Result<()> {
// Full fsck: verify capnp logs and compare index with rebuilt
let report = store::fsck_full()?;
if report.capnp_repaired {
eprintln!("capnp log was repaired (corrupt messages truncated)");
}
if !report.zombies.is_empty() {
eprintln!("\nZOMBIE entries (in index but not in log):");
for key in &report.zombies {
eprintln!(" {}", key);
}
}
if !report.missing.is_empty() {
eprintln!("\nMISSING entries (in log but not in index):");
for key in &report.missing {
eprintln!(" {}", key);
}
}
if !report.is_clean() {
eprintln!("\nTo repair: poc-memory admin repair-index");
}
let store = memory::access_local()?;
// Check node-key consistency
let mut issues = 0;
let all_keys = store.all_keys().unwrap_or_default();
for key in &all_keys {
if let Ok(Some(node)) = store.get_node(key) {
if key != &node.key {
eprintln!("MISMATCH: map key '{}' vs node.key '{}'", key, node.key);
issues += 1;
}
}
}
// Check edge endpoints using index
use crate::hippocampus::store::StoreView;
let mut dangling = 0;
let mut orphan_edges: Vec<(String, String)> = Vec::new();
store.for_each_relation(|source, target, _, _| {
let s_missing = !store.contains_key(source).unwrap_or(false);
let t_missing = !store.contains_key(target).unwrap_or(false);
if s_missing {
eprintln!("DANGLING: edge source '{}'", source);
dangling += 1;
}
if t_missing {
eprintln!("DANGLING: edge target '{}'", target);
dangling += 1;
}
if s_missing || t_missing {
orphan_edges.push((source.to_string(), target.to_string()));
}
});
// Prune orphan edges
if !orphan_edges.is_empty() {
let count = orphan_edges.len();
for (source, target) in &orphan_edges {
// set_link_strength with 0 would delete, but we don't have that
// For now just report - full cleanup requires more work
eprintln!("Would prune: {}{}", source, target);
}
eprintln!("Found {} orphan edges (prune not yet implemented for index)", count);
}
let g = store.build_graph();
println!("fsck: {} nodes, {} edges, {} issues, {} dangling",
all_keys.len(), g.edge_count(), issues, dangling);
Ok(())
}
pub async fn cmd_repair_index() -> Result<()> {
store::repair_index()?;
println!("Index repaired successfully.");
Ok(())
}
pub async fn cmd_dedup(apply: bool) -> Result<()> {
use std::collections::HashMap;
let store = memory::access_local()?;
let duplicates = store.find_duplicates()?;
if duplicates.is_empty() {
println!("No duplicate keys found.");
return Ok(());
}
// Count edges per key (we'll map to UUID later)
use crate::hippocampus::store::StoreView;
let mut edges_by_key: HashMap<String, usize> = HashMap::new();
store.for_each_relation(|source, target, _, _| {
*edges_by_key.entry(source.to_string()).or_default() += 1;
*edges_by_key.entry(target.to_string()).or_default() += 1;
});
// Convert to edges_by_uuid for compatibility
let mut edges_by_uuid: HashMap<[u8; 16], usize> = HashMap::new();
for (key, count) in &edges_by_key {
if let Ok(Some(node)) = store.get_node(key) {
edges_by_uuid.insert(node.uuid, *count);
}
}
let mut identical_groups = Vec::new();
let mut diverged_groups = Vec::new();
for (key, mut nodes) in duplicates {
// Sort by version descending so highest-version is first
nodes.sort_by(|a, b| b.version.cmp(&a.version));
// Check if all copies have identical content
let all_same = nodes.windows(2).all(|w| w[0].content == w[1].content);
let info: Vec<_> = nodes.iter().map(|n| {
let edge_count = edges_by_uuid.get(&n.uuid).copied().unwrap_or(0);
(n.clone(), edge_count)
}).collect();
if all_same {
identical_groups.push((key, info));
} else {
diverged_groups.push((key, info));
}
}
// Report
println!("=== Duplicate key report ===\n");
println!("{} identical groups, {} diverged groups\n",
identical_groups.len(), diverged_groups.len());
if !identical_groups.is_empty() {
println!("── Identical (safe to auto-merge) ──");
for (key, copies) in &identical_groups {
let total_edges: usize = copies.iter().map(|c| c.1).sum();
println!(" {} ({} copies, {} total edges)", key, copies.len(), total_edges);
for (node, edges) in copies {
let uuid_hex = node.uuid.iter().map(|b| format!("{:02x}", b)).collect::<String>();
println!(" v{} uuid={}.. edges={}", node.version, &uuid_hex[..8], edges);
}
}
println!();
}
if !diverged_groups.is_empty() {
println!("── Diverged (need review) ──");
for (key, copies) in &diverged_groups {
let total_edges: usize = copies.iter().map(|c| c.1).sum();
println!(" {} ({} copies, {} total edges)", key, copies.len(), total_edges);
for (node, edges) in copies {
let uuid_hex = node.uuid.iter().map(|b| format!("{:02x}", b)).collect::<String>();
let preview: String = node.content.chars().take(80).collect();
println!(" v{} uuid={}.. edges={} | {}{}",
node.version, &uuid_hex[..8], edges, preview,
if node.content.len() > 80 { "..." } else { "" });
}
}
println!();
}
if !apply {
let total_dupes: usize = identical_groups.iter().chain(diverged_groups.iter())
.map(|(_, copies)| copies.len() - 1)
.sum();
println!("Dry run: {} duplicate nodes would be merged. Use --apply to execute.", total_dupes);
return Ok(());
}
// Merge all groups: identical + diverged
// For diverged: keep the copy with most edges (it's the one that got
// woven into the graph — the version that lived). Fall back to highest version.
let all_groups: Vec<_> = identical_groups.into_iter()
.chain(diverged_groups)
.collect();
// Build uuid → key map for relation key strings
let mut uuid_to_key: HashMap<[u8; 16], String> = HashMap::new();
for key in store.all_keys()? {
if let Ok(Some(node)) = store.get_node(&key) {
uuid_to_key.insert(node.uuid, key);
}
}
let mut merged = 0usize;
let mut edges_redirected = 0usize;
let mut edges_deduped = 0usize;
for (_key, mut copies) in all_groups {
// Pick survivor: most edges first, then highest version
copies.sort_by(|a, b| b.1.cmp(&a.1).then(b.0.version.cmp(&a.0.version)));
let survivor_uuid = copies[0].0.uuid;
let survivor_key = uuid_to_key.get(&survivor_uuid).cloned().unwrap_or_default();
let doomed_uuids: Vec<[u8; 16]> = copies[1..].iter().map(|c| c.0.uuid).collect();
// Redirect edges from doomed UUIDs to survivor via index iteration
for doomed_uuid in &doomed_uuids {
let edges = store.edges_for_uuid(doomed_uuid)?;
for (other_uuid, strength, rel_type, is_outgoing) in edges {
let other_key = uuid_to_key.get(&other_uuid).cloned().unwrap_or_default();
// Remove old edge from index
let (old_src, old_tgt) = if is_outgoing {
(*doomed_uuid, other_uuid)
} else {
(other_uuid, *doomed_uuid)
};
store.remove_relation_from_index(&old_src, &old_tgt, strength, rel_type)?;
// Add redirected edge
let (new_src, new_tgt, src_key, tgt_key) = if is_outgoing {
(survivor_uuid, other_uuid, survivor_key.clone(), other_key)
} else {
(other_uuid, survivor_uuid, other_key, survivor_key.clone())
};
store.index_relation(&new_src, &new_tgt, strength, rel_type)?;
// Append tombstone for old + new relation to log
let mut tombstone = store::new_relation(
old_src, old_tgt,
store::RelationType::from_u8(rel_type), strength,
&uuid_to_key.get(&old_src).cloned().unwrap_or_default(),
&uuid_to_key.get(&old_tgt).cloned().unwrap_or_default(),
"system",
);
tombstone.deleted = true;
tombstone.version = 2;
let mut redirected = store::new_relation(
new_src, new_tgt,
store::RelationType::from_u8(rel_type), strength,
&src_key, &tgt_key,
"system",
);
redirected.version = 2;
store.append_relations(&[tombstone, redirected])?;
edges_redirected += 1;
}
}
// Dedup edges: same (other_uuid, rel_type) → keep highest strength
// Group edges by (other, type), sort each group by strength desc, tombstone extras
let edges = store.edges_for_uuid(&survivor_uuid)?;
let mut by_endpoint: HashMap<([u8; 16], u8), Vec<(f32, bool)>> = HashMap::new();
for (other_uuid, strength, rel_type, is_outgoing) in edges {
by_endpoint.entry((other_uuid, rel_type))
.or_default()
.push((strength, is_outgoing));
}
for ((other_uuid, rel_type), mut variants) in by_endpoint {
if variants.len() <= 1 { continue; }
// Sort by strength descending, keep first
variants.sort_by(|a, b| b.0.total_cmp(&a.0));
let other_key = uuid_to_key.get(&other_uuid).cloned().unwrap_or_default();
for (strength, is_outgoing) in variants.into_iter().skip(1) {
let (src, tgt, src_key, tgt_key) = if is_outgoing {
(survivor_uuid, other_uuid, survivor_key.clone(), other_key.clone())
} else {
(other_uuid, survivor_uuid, other_key.clone(), survivor_key.clone())
};
store.remove_relation_from_index(&src, &tgt, strength, rel_type)?;
let mut tombstone = store::new_relation(
src, tgt,
store::RelationType::from_u8(rel_type), strength,
&src_key, &tgt_key,
"system",
);
tombstone.deleted = true;
tombstone.version = 2;
store.append_relations(&[tombstone])?;
edges_deduped += 1;
}
}
// Tombstone doomed nodes
let mut tombstones = Vec::new();
for (doomed_node, _) in &copies[1..] {
let mut t = doomed_node.clone();
t.deleted = true;
t.version += 1;
tombstones.push(t);
}
store.append_nodes(&tombstones)?;
// Remove doomed nodes from index
for (doomed_node, _) in &copies[1..] {
store.remove_from_index(&doomed_node.key)?;
}
merged += doomed_uuids.len();
}
store.save()?;
println!("Merged {} duplicates, redirected {} edges, deduped {} duplicate edges",
merged, edges_redirected, edges_deduped);
Ok(())
}
pub async fn cmd_health() -> Result<()> {
let result = memory::graph_health(None).await
?;
print!("{}", result);
Ok(())
}
pub async fn cmd_topology() -> Result<()> {
let result = memory::graph_topology(None).await
?;
print!("{}", result);
Ok(())
}
pub async fn cmd_daily_check() -> Result<()> {
let store = memory::access_local()?;
let report = crate::neuro::daily_check(&store);
print!("{}", report);
Ok(())
}
pub async fn cmd_status() -> Result<()> {
let result = memory::graph_topology(None).await
?;
print!("{}", result);
2026-04-12 23:01:39 -04:00
Ok(())
}