// cli/admin.rs — admin subcommand handlers use crate::store; fn install_default_file(data_dir: &std::path::Path, name: &str, content: &str) -> Result<(), String> { let path = data_dir.join(name); if !path.exists() { std::fs::write(&path, content) .map_err(|e| format!("write {}: {}", name, e))?; println!("Created {}", path.display()); } Ok(()) } pub fn cmd_init() -> Result<(), String> { let cfg = crate::config::get(); // Ensure data directory exists std::fs::create_dir_all(&cfg.data_dir) .map_err(|e| format!("create data_dir: {}", e))?; // Install filesystem files (not store nodes) install_default_file(&cfg.data_dir, "instructions.md", include_str!("../../defaults/instructions.md"))?; install_default_file(&cfg.data_dir, "on-consciousness.md", include_str!("../../defaults/on-consciousness.md"))?; // Initialize store and seed default identity node if empty let mut store = store::Store::load()?; let count = store.init_from_markdown()?; for key in &cfg.core_nodes { if !store.nodes.contains_key(key) && key == "identity" { let default = include_str!("../../defaults/identity.md"); store.upsert(key, default) .map_err(|e| format!("seed {}: {}", key, e))?; println!("Seeded {} in store", key); } } store.save()?; println!("Indexed {} memory units", count); // Install hooks crate::claude::hook::install_hook()?; // Create config if none exists let config_path = std::env::var("POC_MEMORY_CONFIG") .map(std::path::PathBuf::from) .unwrap_or_else(|_| { dirs::home_dir().unwrap_or_default() .join(".consciousness/config.jsonl") }); if !config_path.exists() { let config_dir = config_path.parent().unwrap(); std::fs::create_dir_all(config_dir) .map_err(|e| format!("create config dir: {}", e))?; let example = include_str!("../../config.example.jsonl"); std::fs::write(&config_path, example) .map_err(|e| format!("write config: {}", e))?; println!("Created config at {} — edit with your name and context groups", config_path.display()); } println!("Done. Run `poc-memory load-context --stats` to verify."); Ok(()) } pub fn cmd_bulk_rename(from: &str, to: &str, apply: bool) -> Result<(), String> { let mut store = store::Store::load()?; // Find all keys that need renaming let renames: Vec<(String, String)> = store.nodes.keys() .filter(|k| k.contains(from)) .map(|k| (k.clone(), k.replace(from, to))) .collect(); // Check for collisions let existing: std::collections::HashSet<&String> = store.nodes.keys().collect(); let mut collisions = 0; for (old, new) in &renames { if existing.contains(new) && old != new { eprintln!("COLLISION: {} -> {} (target exists)", old, new); collisions += 1; } } println!("Bulk rename '{}' -> '{}'", from, to); println!(" Keys to rename: {}", renames.len()); println!(" Collisions: {}", collisions); if collisions > 0 { return Err(format!("{} collisions — aborting", collisions)); } if !apply { // Show a sample for (old, new) in renames.iter().take(10) { println!(" {} -> {}", old, new); } if renames.len() > 10 { println!(" ... and {} more", renames.len() - 10); } println!("\nDry run. Use --apply to execute."); return Ok(()); } // Apply renames using rename_node() which properly appends to capnp logs. // Process in batches to avoid holding the lock too long. let mut renamed_count = 0; let mut errors = 0; let total = renames.len(); for (i, (old_key, new_key)) in renames.iter().enumerate() { match store.rename_node(old_key, new_key) { Ok(()) => renamed_count += 1, Err(e) => { eprintln!(" RENAME ERROR: {} -> {}: {}", old_key, new_key, e); errors += 1; } } if (i + 1) % 1000 == 0 { println!(" {}/{} ({} errors)", i + 1, total, errors); } } store.save()?; println!("Renamed {} nodes ({} errors).", renamed_count, errors); // Run fsck to verify println!("\nRunning fsck..."); drop(store); cmd_fsck()?; Ok(()) } pub fn cmd_fsck() -> Result<(), String> { let mut store = store::Store::load()?; // Check cache vs log consistency let log_store = store::Store::load_from_logs()?; let mut cache_issues = 0; // Nodes in logs but missing from cache for key in log_store.nodes.keys() { if !store.nodes.contains_key(key) { eprintln!("CACHE MISSING: '{}' exists in capnp log but not in cache", key); cache_issues += 1; } } // Nodes in cache but not in logs (phantom nodes) for key in store.nodes.keys() { if !log_store.nodes.contains_key(key) { eprintln!("CACHE PHANTOM: '{}' exists in cache but not in capnp log", key); cache_issues += 1; } } // Version mismatches for (key, log_node) in &log_store.nodes { if let Some(cache_node) = store.nodes.get(key) && cache_node.version != log_node.version { eprintln!("CACHE STALE: '{}' cache v{} vs log v{}", key, cache_node.version, log_node.version); cache_issues += 1; } } if cache_issues > 0 { eprintln!("{} cache inconsistencies found — rebuilding from logs", cache_issues); store = log_store; store.save().map_err(|e| format!("rebuild save: {}", e))?; } // Check node-key consistency let mut issues = 0; for (key, node) in &store.nodes { if key != &node.key { eprintln!("MISMATCH: map key '{}' vs node.key '{}'", key, node.key); issues += 1; } } // Check edge endpoints let mut dangling = 0; for rel in &store.relations { if rel.deleted { continue; } if !store.nodes.contains_key(&rel.source_key) { eprintln!("DANGLING: edge source '{}'", rel.source_key); dangling += 1; } if !store.nodes.contains_key(&rel.target_key) { eprintln!("DANGLING: edge target '{}'", rel.target_key); dangling += 1; } } // Prune orphan edges let mut to_tombstone = Vec::new(); for rel in &store.relations { if rel.deleted { continue; } if !store.nodes.contains_key(&rel.source_key) || !store.nodes.contains_key(&rel.target_key) { let mut tombstone = rel.clone(); tombstone.deleted = true; tombstone.version += 1; to_tombstone.push(tombstone); } } if !to_tombstone.is_empty() { let count = to_tombstone.len(); store.append_relations(&to_tombstone)?; for t in &to_tombstone { if let Some(r) = store.relations.iter_mut().find(|r| r.uuid == t.uuid) { r.deleted = true; r.version = t.version; } } store.save()?; eprintln!("Pruned {} orphan edges", count); } let g = store.build_graph(); println!("fsck: {} nodes, {} edges, {} issues, {} dangling, {} cache", store.nodes.len(), g.edge_count(), issues, dangling, cache_issues); Ok(()) } pub fn cmd_dedup(apply: bool) -> Result<(), String> { use std::collections::{HashMap, HashSet}; let mut store = store::Store::load()?; let duplicates = store.find_duplicates()?; if duplicates.is_empty() { println!("No duplicate keys found."); return Ok(()); } // Count edges per UUID let mut edges_by_uuid: HashMap<[u8; 16], usize> = HashMap::new(); for rel in &store.relations { if rel.deleted { continue; } *edges_by_uuid.entry(rel.source).or_default() += 1; *edges_by_uuid.entry(rel.target).or_default() += 1; } let mut identical_groups = Vec::new(); let mut diverged_groups = Vec::new(); for (key, mut nodes) in duplicates { // Sort by version descending so highest-version is first nodes.sort_by(|a, b| b.version.cmp(&a.version)); // Check if all copies have identical content let all_same = nodes.windows(2).all(|w| w[0].content == w[1].content); let info: Vec<_> = nodes.iter().map(|n| { let edge_count = edges_by_uuid.get(&n.uuid).copied().unwrap_or(0); (n.clone(), edge_count) }).collect(); if all_same { identical_groups.push((key, info)); } else { diverged_groups.push((key, info)); } } // Report println!("=== Duplicate key report ===\n"); println!("{} identical groups, {} diverged groups\n", identical_groups.len(), diverged_groups.len()); if !identical_groups.is_empty() { println!("── Identical (safe to auto-merge) ──"); for (key, copies) in &identical_groups { let total_edges: usize = copies.iter().map(|c| c.1).sum(); println!(" {} ({} copies, {} total edges)", key, copies.len(), total_edges); for (node, edges) in copies { let uuid_hex = node.uuid.iter().map(|b| format!("{:02x}", b)).collect::(); println!(" v{} uuid={}.. edges={}", node.version, &uuid_hex[..8], edges); } } println!(); } if !diverged_groups.is_empty() { println!("── Diverged (need review) ──"); for (key, copies) in &diverged_groups { let total_edges: usize = copies.iter().map(|c| c.1).sum(); println!(" {} ({} copies, {} total edges)", key, copies.len(), total_edges); for (node, edges) in copies { let uuid_hex = node.uuid.iter().map(|b| format!("{:02x}", b)).collect::(); let preview: String = node.content.chars().take(80).collect(); println!(" v{} uuid={}.. edges={} | {}{}", node.version, &uuid_hex[..8], edges, preview, if node.content.len() > 80 { "..." } else { "" }); } } println!(); } if !apply { let total_dupes: usize = identical_groups.iter().chain(diverged_groups.iter()) .map(|(_, copies)| copies.len() - 1) .sum(); println!("Dry run: {} duplicate nodes would be merged. Use --apply to execute.", total_dupes); return Ok(()); } // Merge all groups: identical + diverged // For diverged: keep the copy with most edges (it's the one that got // woven into the graph — the version that lived). Fall back to highest version. let all_groups: Vec<_> = identical_groups.into_iter() .chain(diverged_groups) .collect(); let mut merged = 0usize; let mut edges_redirected = 0usize; let mut edges_deduped = 0usize; for (_key, mut copies) in all_groups { // Pick survivor: most edges first, then highest version copies.sort_by(|a, b| b.1.cmp(&a.1).then(b.0.version.cmp(&a.0.version))); let survivor_uuid = copies[0].0.uuid; let doomed_uuids: Vec<[u8; 16]> = copies[1..].iter().map(|c| c.0.uuid).collect(); // Redirect edges from doomed UUIDs to survivor let mut updated_rels = Vec::new(); for rel in &mut store.relations { if rel.deleted { continue; } let mut changed = false; if doomed_uuids.contains(&rel.source) { rel.source = survivor_uuid; changed = true; } if doomed_uuids.contains(&rel.target) { rel.target = survivor_uuid; changed = true; } if changed { rel.version += 1; updated_rels.push(rel.clone()); edges_redirected += 1; } } // Dedup edges: same (source, target, rel_type) → keep highest strength let mut seen: HashSet<([u8; 16], [u8; 16], String)> = HashSet::new(); let mut to_tombstone_rels = Vec::new(); // Sort by strength descending so we keep the strongest let mut rels_with_idx: Vec<(usize, &store::Relation)> = store.relations.iter() .enumerate() .filter(|(_, r)| !r.deleted && (r.source == survivor_uuid || r.target == survivor_uuid)) .collect(); rels_with_idx.sort_by(|a, b| b.1.strength.total_cmp(&a.1.strength)); for (idx, rel) in &rels_with_idx { let edge_key = (rel.source, rel.target, format!("{:?}", rel.rel_type)); if !seen.insert(edge_key) { to_tombstone_rels.push(*idx); edges_deduped += 1; } } for &idx in &to_tombstone_rels { store.relations[idx].deleted = true; store.relations[idx].version += 1; updated_rels.push(store.relations[idx].clone()); } // Tombstone doomed nodes let mut tombstones = Vec::new(); for (doomed_node, _) in &copies[1..] { let mut t = doomed_node.clone(); t.deleted = true; t.version += 1; tombstones.push(t); } store.append_nodes(&tombstones)?; if !updated_rels.is_empty() { store.append_relations(&updated_rels)?; } for uuid in &doomed_uuids { store.uuid_to_key.remove(uuid); } merged += doomed_uuids.len(); } // Remove tombstoned relations from cache store.relations.retain(|r| !r.deleted); store.save()?; println!("Merged {} duplicates, redirected {} edges, deduped {} duplicate edges", merged, edges_redirected, edges_deduped); Ok(()) } pub fn cmd_health() -> Result<(), String> { let store = store::Store::load()?; let g = store.build_graph(); let report = crate::graph::health_report(&g, &store); print!("{}", report); Ok(()) } pub fn cmd_daily_check() -> Result<(), String> { let store = store::Store::load()?; let report = crate::neuro::daily_check(&store); print!("{}", report); Ok(()) } pub fn cmd_import(files: &[String]) -> Result<(), String> { if files.is_empty() { return Err("import requires at least one file path".into()); } let mut store = store::Store::load()?; let mut total_new = 0; let mut total_updated = 0; for arg in files { let path = std::path::PathBuf::from(arg); let resolved = if path.exists() { path } else { let mem_path = store::memory_dir().join(arg); if !mem_path.exists() { eprintln!("File not found: {}", arg); continue; } mem_path }; let (n, u) = store.import_file(&resolved)?; total_new += n; total_updated += u; } if total_new > 0 || total_updated > 0 { store.save()?; } println!("Import: {} new, {} updated", total_new, total_updated); Ok(()) } pub fn cmd_export(files: &[String], export_all: bool) -> Result<(), String> { let store = store::Store::load()?; let targets: Vec = if export_all { let mut files: Vec = store.nodes.keys() .filter(|k| !k.contains('#')) .cloned() .collect(); files.sort(); files } else if files.is_empty() { return Err("export requires file keys or --all".into()); } else { files.iter().map(|a| { a.strip_suffix(".md").unwrap_or(a).to_string() }).collect() }; let mem_dir = store::memory_dir(); for file_key in &targets { match store.export_to_markdown(file_key) { Some(content) => { let out_path = mem_dir.join(format!("{}.md", file_key)); std::fs::write(&out_path, &content) .map_err(|e| format!("write {}: {}", out_path.display(), e))?; let section_count = content.matches("