cli: extract agent and admin commands from main.rs
Move agent handlers (consolidate, replay, digest, experience-mine, fact-mine, knowledge-loop, apply-*) into cli/agent.rs. Move admin handlers (init, fsck, dedup, bulk-rename, health, daily-check, import, export) into cli/admin.rs. Functions tightly coupled to Clap types (cmd_daemon, cmd_digest, cmd_apply_agent, cmd_experience_mine) remain in main.rs. main.rs: 3130 → 1586 lines (49% reduction). Co-Authored-By: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
aa2fddf137
commit
f423cf22df
4 changed files with 745 additions and 722 deletions
486
poc-memory/src/cli/admin.rs
Normal file
486
poc-memory/src/cli/admin.rs
Normal file
|
|
@ -0,0 +1,486 @@
|
||||||
|
// cli/admin.rs — admin subcommand handlers
|
||||||
|
|
||||||
|
use crate::store;
|
||||||
|
fn install_default_file(data_dir: &std::path::Path, name: &str, content: &str) -> Result<(), String> {
|
||||||
|
let path = data_dir.join(name);
|
||||||
|
if !path.exists() {
|
||||||
|
std::fs::write(&path, content)
|
||||||
|
.map_err(|e| format!("write {}: {}", name, e))?;
|
||||||
|
println!("Created {}", path.display());
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
use crate::store::StoreView;
|
||||||
|
|
||||||
|
pub fn cmd_init() -> Result<(), String> {
|
||||||
|
let cfg = crate::config::get();
|
||||||
|
|
||||||
|
// Ensure data directory exists
|
||||||
|
std::fs::create_dir_all(&cfg.data_dir)
|
||||||
|
.map_err(|e| format!("create data_dir: {}", e))?;
|
||||||
|
|
||||||
|
// Install filesystem files (not store nodes)
|
||||||
|
install_default_file(&cfg.data_dir, "instructions.md",
|
||||||
|
include_str!("../../defaults/instructions.md"))?;
|
||||||
|
install_default_file(&cfg.data_dir, "on-consciousness.md",
|
||||||
|
include_str!("../../defaults/on-consciousness.md"))?;
|
||||||
|
|
||||||
|
// Initialize store and seed default identity node if empty
|
||||||
|
let mut store = store::Store::load()?;
|
||||||
|
let count = store.init_from_markdown()?;
|
||||||
|
for key in &cfg.core_nodes {
|
||||||
|
if !store.nodes.contains_key(key) && key == "identity" {
|
||||||
|
let default = include_str!("../../defaults/identity.md");
|
||||||
|
store.upsert(key, default)
|
||||||
|
.map_err(|e| format!("seed {}: {}", key, e))?;
|
||||||
|
println!("Seeded {} in store", key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
store.save()?;
|
||||||
|
println!("Indexed {} memory units", count);
|
||||||
|
|
||||||
|
// Install hooks
|
||||||
|
crate::daemon::install_hook()?;
|
||||||
|
|
||||||
|
// Create config if none exists
|
||||||
|
let config_path = std::env::var("POC_MEMORY_CONFIG")
|
||||||
|
.map(std::path::PathBuf::from)
|
||||||
|
.unwrap_or_else(|_| {
|
||||||
|
std::path::PathBuf::from(std::env::var("HOME").unwrap())
|
||||||
|
.join(".config/poc-memory/config.jsonl")
|
||||||
|
});
|
||||||
|
if !config_path.exists() {
|
||||||
|
let config_dir = config_path.parent().unwrap();
|
||||||
|
std::fs::create_dir_all(config_dir)
|
||||||
|
.map_err(|e| format!("create config dir: {}", e))?;
|
||||||
|
let example = include_str!("../../config.example.jsonl");
|
||||||
|
std::fs::write(&config_path, example)
|
||||||
|
.map_err(|e| format!("write config: {}", e))?;
|
||||||
|
println!("Created config at {} — edit with your name and context groups",
|
||||||
|
config_path.display());
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("Done. Run `poc-memory load-context --stats` to verify.");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_bulk_rename(from: &str, to: &str, apply: bool) -> Result<(), String> {
|
||||||
|
let mut store = store::Store::load()?;
|
||||||
|
|
||||||
|
// Find all keys that need renaming
|
||||||
|
let renames: Vec<(String, String)> = store.nodes.keys()
|
||||||
|
.filter(|k| k.contains(from))
|
||||||
|
.map(|k| (k.clone(), k.replace(from, to)))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Check for collisions
|
||||||
|
let existing: std::collections::HashSet<&String> = store.nodes.keys().collect();
|
||||||
|
let mut collisions = 0;
|
||||||
|
for (old, new) in &renames {
|
||||||
|
if existing.contains(new) && old != new {
|
||||||
|
eprintln!("COLLISION: {} -> {} (target exists)", old, new);
|
||||||
|
collisions += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("Bulk rename '{}' -> '{}'", from, to);
|
||||||
|
println!(" Keys to rename: {}", renames.len());
|
||||||
|
println!(" Collisions: {}", collisions);
|
||||||
|
|
||||||
|
if collisions > 0 {
|
||||||
|
return Err(format!("{} collisions — aborting", collisions));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !apply {
|
||||||
|
// Show a sample
|
||||||
|
for (old, new) in renames.iter().take(10) {
|
||||||
|
println!(" {} -> {}", old, new);
|
||||||
|
}
|
||||||
|
if renames.len() > 10 {
|
||||||
|
println!(" ... and {} more", renames.len() - 10);
|
||||||
|
}
|
||||||
|
println!("\nDry run. Use --apply to execute.");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply renames using rename_node() which properly appends to capnp logs.
|
||||||
|
// Process in batches to avoid holding the lock too long.
|
||||||
|
let mut renamed_count = 0;
|
||||||
|
let mut errors = 0;
|
||||||
|
let total = renames.len();
|
||||||
|
for (i, (old_key, new_key)) in renames.iter().enumerate() {
|
||||||
|
match store.rename_node(old_key, new_key) {
|
||||||
|
Ok(()) => renamed_count += 1,
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!(" RENAME ERROR: {} -> {}: {}", old_key, new_key, e);
|
||||||
|
errors += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (i + 1) % 1000 == 0 {
|
||||||
|
println!(" {}/{} ({} errors)", i + 1, total, errors);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
store.save()?;
|
||||||
|
println!("Renamed {} nodes ({} errors).", renamed_count, errors);
|
||||||
|
|
||||||
|
// Run fsck to verify
|
||||||
|
println!("\nRunning fsck...");
|
||||||
|
drop(store);
|
||||||
|
cmd_fsck()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_fsck() -> Result<(), String> {
|
||||||
|
let mut store = store::Store::load()?;
|
||||||
|
|
||||||
|
// Check cache vs log consistency
|
||||||
|
let log_store = store::Store::load_from_logs()?;
|
||||||
|
let mut cache_issues = 0;
|
||||||
|
|
||||||
|
// Nodes in logs but missing from cache
|
||||||
|
for key in log_store.nodes.keys() {
|
||||||
|
if !store.nodes.contains_key(key) {
|
||||||
|
eprintln!("CACHE MISSING: '{}' exists in capnp log but not in cache", key);
|
||||||
|
cache_issues += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Nodes in cache but not in logs (phantom nodes)
|
||||||
|
for key in store.nodes.keys() {
|
||||||
|
if !log_store.nodes.contains_key(key) {
|
||||||
|
eprintln!("CACHE PHANTOM: '{}' exists in cache but not in capnp log", key);
|
||||||
|
cache_issues += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Version mismatches
|
||||||
|
for (key, log_node) in &log_store.nodes {
|
||||||
|
if let Some(cache_node) = store.nodes.get(key) {
|
||||||
|
if cache_node.version != log_node.version {
|
||||||
|
eprintln!("CACHE STALE: '{}' cache v{} vs log v{}",
|
||||||
|
key, cache_node.version, log_node.version);
|
||||||
|
cache_issues += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache_issues > 0 {
|
||||||
|
eprintln!("{} cache inconsistencies found — rebuilding from logs", cache_issues);
|
||||||
|
store = log_store;
|
||||||
|
store.save().map_err(|e| format!("rebuild save: {}", e))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check node-key consistency
|
||||||
|
let mut issues = 0;
|
||||||
|
for (key, node) in &store.nodes {
|
||||||
|
if key != &node.key {
|
||||||
|
eprintln!("MISMATCH: map key '{}' vs node.key '{}'", key, node.key);
|
||||||
|
issues += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check edge endpoints
|
||||||
|
let mut dangling = 0;
|
||||||
|
for rel in &store.relations {
|
||||||
|
if rel.deleted { continue; }
|
||||||
|
if !store.nodes.contains_key(&rel.source_key) {
|
||||||
|
eprintln!("DANGLING: edge source '{}'", rel.source_key);
|
||||||
|
dangling += 1;
|
||||||
|
}
|
||||||
|
if !store.nodes.contains_key(&rel.target_key) {
|
||||||
|
eprintln!("DANGLING: edge target '{}'", rel.target_key);
|
||||||
|
dangling += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prune orphan edges
|
||||||
|
let mut to_tombstone = Vec::new();
|
||||||
|
for rel in &store.relations {
|
||||||
|
if rel.deleted { continue; }
|
||||||
|
if !store.nodes.contains_key(&rel.source_key)
|
||||||
|
|| !store.nodes.contains_key(&rel.target_key) {
|
||||||
|
let mut tombstone = rel.clone();
|
||||||
|
tombstone.deleted = true;
|
||||||
|
tombstone.version += 1;
|
||||||
|
to_tombstone.push(tombstone);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !to_tombstone.is_empty() {
|
||||||
|
let count = to_tombstone.len();
|
||||||
|
store.append_relations(&to_tombstone)?;
|
||||||
|
for t in &to_tombstone {
|
||||||
|
if let Some(r) = store.relations.iter_mut().find(|r| r.uuid == t.uuid) {
|
||||||
|
r.deleted = true;
|
||||||
|
r.version = t.version;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
store.save()?;
|
||||||
|
eprintln!("Pruned {} orphan edges", count);
|
||||||
|
}
|
||||||
|
|
||||||
|
let g = store.build_graph();
|
||||||
|
println!("fsck: {} nodes, {} edges, {} issues, {} dangling, {} cache",
|
||||||
|
store.nodes.len(), g.edge_count(), issues, dangling, cache_issues);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_dedup(apply: bool) -> Result<(), String> {
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
|
let mut store = store::Store::load()?;
|
||||||
|
let duplicates = store.find_duplicates()?;
|
||||||
|
|
||||||
|
if duplicates.is_empty() {
|
||||||
|
println!("No duplicate keys found.");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count edges per UUID
|
||||||
|
let mut edges_by_uuid: HashMap<[u8; 16], usize> = HashMap::new();
|
||||||
|
for rel in &store.relations {
|
||||||
|
if rel.deleted { continue; }
|
||||||
|
*edges_by_uuid.entry(rel.source).or_default() += 1;
|
||||||
|
*edges_by_uuid.entry(rel.target).or_default() += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut identical_groups = Vec::new();
|
||||||
|
let mut diverged_groups = Vec::new();
|
||||||
|
|
||||||
|
for (key, mut nodes) in duplicates {
|
||||||
|
// Sort by version descending so highest-version is first
|
||||||
|
nodes.sort_by(|a, b| b.version.cmp(&a.version));
|
||||||
|
|
||||||
|
// Check if all copies have identical content
|
||||||
|
let all_same = nodes.windows(2).all(|w| w[0].content == w[1].content);
|
||||||
|
|
||||||
|
let info: Vec<_> = nodes.iter().map(|n| {
|
||||||
|
let edge_count = edges_by_uuid.get(&n.uuid).copied().unwrap_or(0);
|
||||||
|
(n.clone(), edge_count)
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
if all_same {
|
||||||
|
identical_groups.push((key, info));
|
||||||
|
} else {
|
||||||
|
diverged_groups.push((key, info));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Report
|
||||||
|
println!("=== Duplicate key report ===\n");
|
||||||
|
println!("{} identical groups, {} diverged groups\n",
|
||||||
|
identical_groups.len(), diverged_groups.len());
|
||||||
|
|
||||||
|
if !identical_groups.is_empty() {
|
||||||
|
println!("── Identical (safe to auto-merge) ──");
|
||||||
|
for (key, copies) in &identical_groups {
|
||||||
|
let total_edges: usize = copies.iter().map(|c| c.1).sum();
|
||||||
|
println!(" {} ({} copies, {} total edges)", key, copies.len(), total_edges);
|
||||||
|
for (node, edges) in copies {
|
||||||
|
let uuid_hex = node.uuid.iter().map(|b| format!("{:02x}", b)).collect::<String>();
|
||||||
|
println!(" v{} uuid={}.. edges={}", node.version, &uuid_hex[..8], edges);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
|
||||||
|
if !diverged_groups.is_empty() {
|
||||||
|
println!("── Diverged (need review) ──");
|
||||||
|
for (key, copies) in &diverged_groups {
|
||||||
|
let total_edges: usize = copies.iter().map(|c| c.1).sum();
|
||||||
|
println!(" {} ({} copies, {} total edges)", key, copies.len(), total_edges);
|
||||||
|
for (node, edges) in copies {
|
||||||
|
let uuid_hex = node.uuid.iter().map(|b| format!("{:02x}", b)).collect::<String>();
|
||||||
|
let preview: String = node.content.chars().take(80).collect();
|
||||||
|
println!(" v{} uuid={}.. edges={} | {}{}",
|
||||||
|
node.version, &uuid_hex[..8], edges, preview,
|
||||||
|
if node.content.len() > 80 { "..." } else { "" });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
|
||||||
|
if !apply {
|
||||||
|
let total_dupes: usize = identical_groups.iter().chain(diverged_groups.iter())
|
||||||
|
.map(|(_, copies)| copies.len() - 1)
|
||||||
|
.sum();
|
||||||
|
println!("Dry run: {} duplicate nodes would be merged. Use --apply to execute.", total_dupes);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge all groups: identical + diverged
|
||||||
|
// For diverged: keep the copy with most edges (it's the one that got
|
||||||
|
// woven into the graph — the version that lived). Fall back to highest version.
|
||||||
|
let all_groups: Vec<_> = identical_groups.into_iter()
|
||||||
|
.chain(diverged_groups.into_iter())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut merged = 0usize;
|
||||||
|
let mut edges_redirected = 0usize;
|
||||||
|
let mut edges_deduped = 0usize;
|
||||||
|
|
||||||
|
for (_key, mut copies) in all_groups {
|
||||||
|
// Pick survivor: most edges first, then highest version
|
||||||
|
copies.sort_by(|a, b| b.1.cmp(&a.1).then(b.0.version.cmp(&a.0.version)));
|
||||||
|
|
||||||
|
let survivor_uuid = copies[0].0.uuid;
|
||||||
|
let doomed_uuids: Vec<[u8; 16]> = copies[1..].iter().map(|c| c.0.uuid).collect();
|
||||||
|
|
||||||
|
// Redirect edges from doomed UUIDs to survivor
|
||||||
|
let mut updated_rels = Vec::new();
|
||||||
|
for rel in &mut store.relations {
|
||||||
|
if rel.deleted { continue; }
|
||||||
|
let mut changed = false;
|
||||||
|
if doomed_uuids.contains(&rel.source) {
|
||||||
|
rel.source = survivor_uuid;
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
if doomed_uuids.contains(&rel.target) {
|
||||||
|
rel.target = survivor_uuid;
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
if changed {
|
||||||
|
rel.version += 1;
|
||||||
|
updated_rels.push(rel.clone());
|
||||||
|
edges_redirected += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dedup edges: same (source, target, rel_type) → keep highest strength
|
||||||
|
let mut seen: HashSet<([u8; 16], [u8; 16], String)> = HashSet::new();
|
||||||
|
let mut to_tombstone_rels = Vec::new();
|
||||||
|
// Sort by strength descending so we keep the strongest
|
||||||
|
let mut rels_with_idx: Vec<(usize, &store::Relation)> = store.relations.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(_, r)| !r.deleted && (r.source == survivor_uuid || r.target == survivor_uuid))
|
||||||
|
.collect();
|
||||||
|
rels_with_idx.sort_by(|a, b| b.1.strength.total_cmp(&a.1.strength));
|
||||||
|
|
||||||
|
for (idx, rel) in &rels_with_idx {
|
||||||
|
let edge_key = (rel.source, rel.target, format!("{:?}", rel.rel_type));
|
||||||
|
if !seen.insert(edge_key) {
|
||||||
|
to_tombstone_rels.push(*idx);
|
||||||
|
edges_deduped += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for &idx in &to_tombstone_rels {
|
||||||
|
store.relations[idx].deleted = true;
|
||||||
|
store.relations[idx].version += 1;
|
||||||
|
updated_rels.push(store.relations[idx].clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tombstone doomed nodes
|
||||||
|
let mut tombstones = Vec::new();
|
||||||
|
for (doomed_node, _) in &copies[1..] {
|
||||||
|
let mut t = doomed_node.clone();
|
||||||
|
t.deleted = true;
|
||||||
|
t.version += 1;
|
||||||
|
tombstones.push(t);
|
||||||
|
}
|
||||||
|
|
||||||
|
store.append_nodes(&tombstones)?;
|
||||||
|
if !updated_rels.is_empty() {
|
||||||
|
store.append_relations(&updated_rels)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for uuid in &doomed_uuids {
|
||||||
|
store.uuid_to_key.remove(uuid);
|
||||||
|
}
|
||||||
|
|
||||||
|
merged += doomed_uuids.len();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove tombstoned relations from cache
|
||||||
|
store.relations.retain(|r| !r.deleted);
|
||||||
|
store.save()?;
|
||||||
|
|
||||||
|
println!("Merged {} duplicates, redirected {} edges, deduped {} duplicate edges",
|
||||||
|
merged, edges_redirected, edges_deduped);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_health() -> Result<(), String> {
|
||||||
|
let store = store::Store::load()?;
|
||||||
|
let g = store.build_graph();
|
||||||
|
let report = crate::graph::health_report(&g, &store);
|
||||||
|
print!("{}", report);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_daily_check() -> Result<(), String> {
|
||||||
|
let store = store::Store::load()?;
|
||||||
|
let report = crate::neuro::daily_check(&store);
|
||||||
|
print!("{}", report);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_import(files: &[String]) -> Result<(), String> {
|
||||||
|
if files.is_empty() {
|
||||||
|
return Err("import requires at least one file path".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut store = store::Store::load()?;
|
||||||
|
let mut total_new = 0;
|
||||||
|
let mut total_updated = 0;
|
||||||
|
|
||||||
|
for arg in files {
|
||||||
|
let path = std::path::PathBuf::from(arg);
|
||||||
|
let resolved = if path.exists() {
|
||||||
|
path
|
||||||
|
} else {
|
||||||
|
let mem_path = store::memory_dir().join(arg);
|
||||||
|
if !mem_path.exists() {
|
||||||
|
eprintln!("File not found: {}", arg);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
mem_path
|
||||||
|
};
|
||||||
|
let (n, u) = store.import_file(&resolved)?;
|
||||||
|
total_new += n;
|
||||||
|
total_updated += u;
|
||||||
|
}
|
||||||
|
|
||||||
|
if total_new > 0 || total_updated > 0 {
|
||||||
|
store.save()?;
|
||||||
|
}
|
||||||
|
println!("Import: {} new, {} updated", total_new, total_updated);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_export(files: &[String], export_all: bool) -> Result<(), String> {
|
||||||
|
let store = store::Store::load()?;
|
||||||
|
|
||||||
|
let targets: Vec<String> = if export_all {
|
||||||
|
let mut files: Vec<String> = store.nodes.keys()
|
||||||
|
.filter(|k| !k.contains('#'))
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
files.sort();
|
||||||
|
files
|
||||||
|
} else if files.is_empty() {
|
||||||
|
return Err("export requires file keys or --all".into());
|
||||||
|
} else {
|
||||||
|
files.iter().map(|a| {
|
||||||
|
a.strip_suffix(".md").unwrap_or(a).to_string()
|
||||||
|
}).collect()
|
||||||
|
};
|
||||||
|
|
||||||
|
let mem_dir = store::memory_dir();
|
||||||
|
|
||||||
|
for file_key in &targets {
|
||||||
|
match store.export_to_markdown(file_key) {
|
||||||
|
Some(content) => {
|
||||||
|
let out_path = mem_dir.join(format!("{}.md", file_key));
|
||||||
|
std::fs::write(&out_path, &content)
|
||||||
|
.map_err(|e| format!("write {}: {}", out_path.display(), e))?;
|
||||||
|
let section_count = content.matches("<!-- mem:").count() + 1;
|
||||||
|
println!("Exported {} ({} sections)", file_key, section_count);
|
||||||
|
}
|
||||||
|
None => eprintln!("No nodes for '{}'", file_key),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
142
poc-memory/src/cli/agent.rs
Normal file
142
poc-memory/src/cli/agent.rs
Normal file
|
|
@ -0,0 +1,142 @@
|
||||||
|
// cli/agent.rs — agent subcommand handlers
|
||||||
|
|
||||||
|
use crate::store;
|
||||||
|
use crate::store::StoreView;
|
||||||
|
|
||||||
|
pub fn cmd_consolidate_batch(count: usize, auto: bool, agent: Option<String>) -> Result<(), String> {
|
||||||
|
let store = store::Store::load()?;
|
||||||
|
|
||||||
|
if let Some(agent_name) = agent {
|
||||||
|
let batch = crate::agents::prompts::agent_prompt(&store, &agent_name, count)?;
|
||||||
|
println!("{}", batch.prompt);
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
crate::agents::prompts::consolidation_batch(&store, count, auto)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_replay_queue(count: usize) -> Result<(), String> {
|
||||||
|
let store = store::Store::load()?;
|
||||||
|
let queue = crate::neuro::replay_queue(&store, count);
|
||||||
|
println!("Replay queue ({} items):", queue.len());
|
||||||
|
for (i, item) in queue.iter().enumerate() {
|
||||||
|
println!(" {:2}. [{:.3}] {:>10} {} (interval={}d, emotion={:.1}, spectral={:.1})",
|
||||||
|
i + 1, item.priority, item.classification, item.key,
|
||||||
|
item.interval_days, item.emotion, item.outlier_score);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_consolidate_session() -> Result<(), String> {
|
||||||
|
let store = store::Store::load()?;
|
||||||
|
let plan = crate::neuro::consolidation_plan(&store);
|
||||||
|
println!("{}", crate::neuro::format_plan(&plan));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_consolidate_full() -> Result<(), String> {
|
||||||
|
let mut store = store::Store::load()?;
|
||||||
|
crate::consolidate::consolidate_full(&mut store)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_digest_links(do_apply: bool) -> Result<(), String> {
|
||||||
|
let store = store::Store::load()?;
|
||||||
|
let links = crate::digest::parse_all_digest_links(&store);
|
||||||
|
drop(store);
|
||||||
|
println!("Found {} unique links from digest nodes", links.len());
|
||||||
|
|
||||||
|
if !do_apply {
|
||||||
|
for (i, link) in links.iter().enumerate() {
|
||||||
|
println!(" {:3}. {} → {}", i + 1, link.source, link.target);
|
||||||
|
if !link.reason.is_empty() {
|
||||||
|
println!(" ({})", &link.reason[..link.reason.len().min(80)]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!("\nTo apply: poc-memory digest-links --apply");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut store = store::Store::load()?;
|
||||||
|
let (applied, skipped, fallbacks) = crate::digest::apply_digest_links(&mut store, &links);
|
||||||
|
println!("\nApplied: {} ({} file-level fallbacks) Skipped: {}", applied, fallbacks, skipped);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_journal_enrich(jsonl_path: &str, entry_text: &str, grep_line: usize) -> Result<(), String> {
|
||||||
|
if !std::path::Path::new(jsonl_path).is_file() {
|
||||||
|
return Err(format!("JSONL not found: {}", jsonl_path));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut store = store::Store::load()?;
|
||||||
|
crate::enrich::journal_enrich(&mut store, jsonl_path, entry_text, grep_line)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_apply_consolidation(do_apply: bool, report_file: Option<&str>) -> Result<(), String> {
|
||||||
|
let mut store = store::Store::load()?;
|
||||||
|
crate::consolidate::apply_consolidation(&mut store, do_apply, report_file)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_knowledge_loop(max_cycles: usize, batch_size: usize, window: usize, max_depth: i32) -> Result<(), String> {
|
||||||
|
let config = crate::knowledge::KnowledgeLoopConfig {
|
||||||
|
max_cycles,
|
||||||
|
batch_size,
|
||||||
|
window,
|
||||||
|
max_depth,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let results = crate::knowledge::run_knowledge_loop(&config)?;
|
||||||
|
eprintln!("\nCompleted {} cycles, {} total actions applied",
|
||||||
|
results.len(),
|
||||||
|
results.iter().map(|r| r.total_applied).sum::<usize>());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_fact_mine(path: &str, batch: bool, dry_run: bool, output_file: Option<&str>, min_messages: usize) -> Result<(), String> {
|
||||||
|
let p = std::path::Path::new(path);
|
||||||
|
|
||||||
|
let paths: Vec<std::path::PathBuf> = if batch {
|
||||||
|
if !p.is_dir() {
|
||||||
|
return Err(format!("Not a directory: {}", path));
|
||||||
|
}
|
||||||
|
let mut files: Vec<_> = std::fs::read_dir(p)
|
||||||
|
.map_err(|e| format!("read dir: {}", e))?
|
||||||
|
.filter_map(|e| e.ok())
|
||||||
|
.map(|e| e.path())
|
||||||
|
.filter(|p| p.extension().map(|x| x == "jsonl").unwrap_or(false))
|
||||||
|
.collect();
|
||||||
|
files.sort();
|
||||||
|
eprintln!("Found {} transcripts", files.len());
|
||||||
|
files
|
||||||
|
} else {
|
||||||
|
vec![p.to_path_buf()]
|
||||||
|
};
|
||||||
|
|
||||||
|
let path_refs: Vec<&std::path::Path> = paths.iter().map(|p| p.as_path()).collect();
|
||||||
|
let facts = crate::fact_mine::mine_batch(&path_refs, min_messages, dry_run)?;
|
||||||
|
|
||||||
|
if !dry_run {
|
||||||
|
let json = serde_json::to_string_pretty(&facts)
|
||||||
|
.map_err(|e| format!("serialize: {}", e))?;
|
||||||
|
if let Some(out) = output_file {
|
||||||
|
std::fs::write(out, &json).map_err(|e| format!("write: {}", e))?;
|
||||||
|
eprintln!("\nWrote {} facts to {}", facts.len(), out);
|
||||||
|
} else {
|
||||||
|
println!("{}", json);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
eprintln!("\nTotal: {} facts from {} transcripts", facts.len(), paths.len());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cmd_fact_mine_store(path: &str) -> Result<(), String> {
|
||||||
|
let path = std::path::Path::new(path);
|
||||||
|
if !path.exists() {
|
||||||
|
return Err(format!("File not found: {}", path.display()));
|
||||||
|
}
|
||||||
|
let count = crate::fact_mine::mine_and_store(path, None)?;
|
||||||
|
eprintln!("Stored {} facts", count);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
|
@ -5,3 +5,5 @@
|
||||||
|
|
||||||
pub mod graph;
|
pub mod graph;
|
||||||
pub mod node;
|
pub mod node;
|
||||||
|
pub mod agent;
|
||||||
|
pub mod admin;
|
||||||
|
|
|
||||||
|
|
@ -758,7 +758,7 @@ fn main() {
|
||||||
JournalCmd::Write { text } => cmd_journal_write(&text),
|
JournalCmd::Write { text } => cmd_journal_write(&text),
|
||||||
JournalCmd::Tail { n, full, level } => cmd_journal_tail(n, full, level),
|
JournalCmd::Tail { n, full, level } => cmd_journal_tail(n, full, level),
|
||||||
JournalCmd::Enrich { jsonl_path, entry_text, grep_line }
|
JournalCmd::Enrich { jsonl_path, entry_text, grep_line }
|
||||||
=> cmd_journal_enrich(&jsonl_path, &entry_text, grep_line),
|
=> cli::agent::cmd_journal_enrich(&jsonl_path, &entry_text, grep_line),
|
||||||
},
|
},
|
||||||
|
|
||||||
// Graph
|
// Graph
|
||||||
|
|
@ -797,33 +797,33 @@ fn main() {
|
||||||
Command::Agent(sub) => match sub {
|
Command::Agent(sub) => match sub {
|
||||||
AgentCmd::Daemon(sub) => cmd_daemon(sub),
|
AgentCmd::Daemon(sub) => cmd_daemon(sub),
|
||||||
AgentCmd::KnowledgeLoop { max_cycles, batch_size, window, max_depth }
|
AgentCmd::KnowledgeLoop { max_cycles, batch_size, window, max_depth }
|
||||||
=> cmd_knowledge_loop(max_cycles, batch_size, window, max_depth),
|
=> cli::agent::cmd_knowledge_loop(max_cycles, batch_size, window, max_depth),
|
||||||
AgentCmd::ConsolidateBatch { count, auto, agent }
|
AgentCmd::ConsolidateBatch { count, auto, agent }
|
||||||
=> cmd_consolidate_batch(count, auto, agent),
|
=> cli::agent::cmd_consolidate_batch(count, auto, agent),
|
||||||
AgentCmd::ConsolidateSession => cmd_consolidate_session(),
|
AgentCmd::ConsolidateSession => cli::agent::cmd_consolidate_session(),
|
||||||
AgentCmd::ConsolidateFull => cmd_consolidate_full(),
|
AgentCmd::ConsolidateFull => cli::agent::cmd_consolidate_full(),
|
||||||
AgentCmd::ApplyAgent { all } => cmd_apply_agent(all),
|
AgentCmd::ApplyAgent { all } => cmd_apply_agent(all),
|
||||||
AgentCmd::ApplyConsolidation { apply, report }
|
AgentCmd::ApplyConsolidation { apply, report }
|
||||||
=> cmd_apply_consolidation(apply, report.as_deref()),
|
=> cli::agent::cmd_apply_consolidation(apply, report.as_deref()),
|
||||||
AgentCmd::Digest { level } => cmd_digest(level),
|
AgentCmd::Digest { level } => cmd_digest(level),
|
||||||
AgentCmd::DigestLinks { apply } => cmd_digest_links(apply),
|
AgentCmd::DigestLinks { apply } => cli::agent::cmd_digest_links(apply),
|
||||||
AgentCmd::ExperienceMine { jsonl_path } => cmd_experience_mine(jsonl_path),
|
AgentCmd::ExperienceMine { jsonl_path } => cmd_experience_mine(jsonl_path),
|
||||||
AgentCmd::FactMine { path, batch, dry_run, output, min_messages }
|
AgentCmd::FactMine { path, batch, dry_run, output, min_messages }
|
||||||
=> cmd_fact_mine(&path, batch, dry_run, output.as_deref(), min_messages),
|
=> cli::agent::cmd_fact_mine(&path, batch, dry_run, output.as_deref(), min_messages),
|
||||||
AgentCmd::FactMineStore { path } => cmd_fact_mine_store(&path),
|
AgentCmd::FactMineStore { path } => cli::agent::cmd_fact_mine_store(&path),
|
||||||
AgentCmd::ReplayQueue { count } => cmd_replay_queue(count),
|
AgentCmd::ReplayQueue { count } => cli::agent::cmd_replay_queue(count),
|
||||||
},
|
},
|
||||||
|
|
||||||
// Admin
|
// Admin
|
||||||
Command::Admin(sub) => match sub {
|
Command::Admin(sub) => match sub {
|
||||||
AdminCmd::Init => cmd_init(),
|
AdminCmd::Init => cli::admin::cmd_init(),
|
||||||
AdminCmd::Health => cmd_health(),
|
AdminCmd::Health => cli::admin::cmd_health(),
|
||||||
AdminCmd::Fsck => cmd_fsck(),
|
AdminCmd::Fsck => cli::admin::cmd_fsck(),
|
||||||
AdminCmd::Dedup { apply } => cmd_dedup(apply),
|
AdminCmd::Dedup { apply } => cli::admin::cmd_dedup(apply),
|
||||||
AdminCmd::BulkRename { from, to, apply } => cmd_bulk_rename(&from, &to, apply),
|
AdminCmd::BulkRename { from, to, apply } => cli::admin::cmd_bulk_rename(&from, &to, apply),
|
||||||
AdminCmd::DailyCheck => cmd_daily_check(),
|
AdminCmd::DailyCheck => cli::admin::cmd_daily_check(),
|
||||||
AdminCmd::Import { files } => cmd_import(&files),
|
AdminCmd::Import { files } => cli::admin::cmd_import(&files),
|
||||||
AdminCmd::Export { files, all } => cmd_export(&files, all),
|
AdminCmd::Export { files, all } => cli::admin::cmd_export(&files, all),
|
||||||
AdminCmd::LoadContext { stats } => cmd_load_context(stats),
|
AdminCmd::LoadContext { stats } => cmd_load_context(stats),
|
||||||
AdminCmd::Log => cmd_log(),
|
AdminCmd::Log => cmd_log(),
|
||||||
AdminCmd::Params => cmd_params(),
|
AdminCmd::Params => cmd_params(),
|
||||||
|
|
@ -974,125 +974,6 @@ fn cmd_search(terms: &[String], pipeline_args: &[String], expand: bool, full: bo
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cmd_init() -> Result<(), String> {
|
|
||||||
let cfg = config::get();
|
|
||||||
|
|
||||||
// Ensure data directory exists
|
|
||||||
std::fs::create_dir_all(&cfg.data_dir)
|
|
||||||
.map_err(|e| format!("create data_dir: {}", e))?;
|
|
||||||
|
|
||||||
// Install filesystem files (not store nodes)
|
|
||||||
install_default_file(&cfg.data_dir, "instructions.md",
|
|
||||||
include_str!("../defaults/instructions.md"))?;
|
|
||||||
install_default_file(&cfg.data_dir, "on-consciousness.md",
|
|
||||||
include_str!("../defaults/on-consciousness.md"))?;
|
|
||||||
|
|
||||||
// Initialize store and seed default identity node if empty
|
|
||||||
let mut store = store::Store::load()?;
|
|
||||||
let count = store.init_from_markdown()?;
|
|
||||||
for key in &cfg.core_nodes {
|
|
||||||
if !store.nodes.contains_key(key) && key == "identity" {
|
|
||||||
let default = include_str!("../defaults/identity.md");
|
|
||||||
store.upsert(key, default)
|
|
||||||
.map_err(|e| format!("seed {}: {}", key, e))?;
|
|
||||||
println!("Seeded {} in store", key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
store.save()?;
|
|
||||||
println!("Indexed {} memory units", count);
|
|
||||||
|
|
||||||
// Install hooks
|
|
||||||
daemon::install_hook()?;
|
|
||||||
|
|
||||||
// Create config if none exists
|
|
||||||
let config_path = std::env::var("POC_MEMORY_CONFIG")
|
|
||||||
.map(std::path::PathBuf::from)
|
|
||||||
.unwrap_or_else(|_| {
|
|
||||||
std::path::PathBuf::from(std::env::var("HOME").unwrap())
|
|
||||||
.join(".config/poc-memory/config.jsonl")
|
|
||||||
});
|
|
||||||
if !config_path.exists() {
|
|
||||||
let config_dir = config_path.parent().unwrap();
|
|
||||||
std::fs::create_dir_all(config_dir)
|
|
||||||
.map_err(|e| format!("create config dir: {}", e))?;
|
|
||||||
let example = include_str!("../config.example.jsonl");
|
|
||||||
std::fs::write(&config_path, example)
|
|
||||||
.map_err(|e| format!("write config: {}", e))?;
|
|
||||||
println!("Created config at {} — edit with your name and context groups",
|
|
||||||
config_path.display());
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("Done. Run `poc-memory load-context --stats` to verify.");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_bulk_rename(from: &str, to: &str, apply: bool) -> Result<(), String> {
|
|
||||||
let mut store = store::Store::load()?;
|
|
||||||
|
|
||||||
// Find all keys that need renaming
|
|
||||||
let renames: Vec<(String, String)> = store.nodes.keys()
|
|
||||||
.filter(|k| k.contains(from))
|
|
||||||
.map(|k| (k.clone(), k.replace(from, to)))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
// Check for collisions
|
|
||||||
let existing: std::collections::HashSet<&String> = store.nodes.keys().collect();
|
|
||||||
let mut collisions = 0;
|
|
||||||
for (old, new) in &renames {
|
|
||||||
if existing.contains(new) && old != new {
|
|
||||||
eprintln!("COLLISION: {} -> {} (target exists)", old, new);
|
|
||||||
collisions += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("Bulk rename '{}' -> '{}'", from, to);
|
|
||||||
println!(" Keys to rename: {}", renames.len());
|
|
||||||
println!(" Collisions: {}", collisions);
|
|
||||||
|
|
||||||
if collisions > 0 {
|
|
||||||
return Err(format!("{} collisions — aborting", collisions));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !apply {
|
|
||||||
// Show a sample
|
|
||||||
for (old, new) in renames.iter().take(10) {
|
|
||||||
println!(" {} -> {}", old, new);
|
|
||||||
}
|
|
||||||
if renames.len() > 10 {
|
|
||||||
println!(" ... and {} more", renames.len() - 10);
|
|
||||||
}
|
|
||||||
println!("\nDry run. Use --apply to execute.");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply renames using rename_node() which properly appends to capnp logs.
|
|
||||||
// Process in batches to avoid holding the lock too long.
|
|
||||||
let mut renamed_count = 0;
|
|
||||||
let mut errors = 0;
|
|
||||||
let total = renames.len();
|
|
||||||
for (i, (old_key, new_key)) in renames.iter().enumerate() {
|
|
||||||
match store.rename_node(old_key, new_key) {
|
|
||||||
Ok(()) => renamed_count += 1,
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!(" RENAME ERROR: {} -> {}: {}", old_key, new_key, e);
|
|
||||||
errors += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (i + 1) % 1000 == 0 {
|
|
||||||
println!(" {}/{} ({} errors)", i + 1, total, errors);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
store.save()?;
|
|
||||||
println!("Renamed {} nodes ({} errors).", renamed_count, errors);
|
|
||||||
|
|
||||||
// Run fsck to verify
|
|
||||||
println!("\nRunning fsck...");
|
|
||||||
drop(store);
|
|
||||||
cmd_fsck()?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn install_default_file(data_dir: &std::path::Path, name: &str, content: &str) -> Result<(), String> {
|
fn install_default_file(data_dir: &std::path::Path, name: &str, content: &str) -> Result<(), String> {
|
||||||
let path = data_dir.join(name);
|
let path = data_dir.join(name);
|
||||||
if !path.exists() {
|
if !path.exists() {
|
||||||
|
|
@ -1103,282 +984,6 @@ fn install_default_file(data_dir: &std::path::Path, name: &str, content: &str) -
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cmd_fsck() -> Result<(), String> {
|
|
||||||
let mut store = store::Store::load()?;
|
|
||||||
|
|
||||||
// Check cache vs log consistency
|
|
||||||
let log_store = store::Store::load_from_logs()?;
|
|
||||||
let mut cache_issues = 0;
|
|
||||||
|
|
||||||
// Nodes in logs but missing from cache
|
|
||||||
for key in log_store.nodes.keys() {
|
|
||||||
if !store.nodes.contains_key(key) {
|
|
||||||
eprintln!("CACHE MISSING: '{}' exists in capnp log but not in cache", key);
|
|
||||||
cache_issues += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Nodes in cache but not in logs (phantom nodes)
|
|
||||||
for key in store.nodes.keys() {
|
|
||||||
if !log_store.nodes.contains_key(key) {
|
|
||||||
eprintln!("CACHE PHANTOM: '{}' exists in cache but not in capnp log", key);
|
|
||||||
cache_issues += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Version mismatches
|
|
||||||
for (key, log_node) in &log_store.nodes {
|
|
||||||
if let Some(cache_node) = store.nodes.get(key) {
|
|
||||||
if cache_node.version != log_node.version {
|
|
||||||
eprintln!("CACHE STALE: '{}' cache v{} vs log v{}",
|
|
||||||
key, cache_node.version, log_node.version);
|
|
||||||
cache_issues += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cache_issues > 0 {
|
|
||||||
eprintln!("{} cache inconsistencies found — rebuilding from logs", cache_issues);
|
|
||||||
store = log_store;
|
|
||||||
store.save().map_err(|e| format!("rebuild save: {}", e))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check node-key consistency
|
|
||||||
let mut issues = 0;
|
|
||||||
for (key, node) in &store.nodes {
|
|
||||||
if key != &node.key {
|
|
||||||
eprintln!("MISMATCH: map key '{}' vs node.key '{}'", key, node.key);
|
|
||||||
issues += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check edge endpoints
|
|
||||||
let mut dangling = 0;
|
|
||||||
for rel in &store.relations {
|
|
||||||
if rel.deleted { continue; }
|
|
||||||
if !store.nodes.contains_key(&rel.source_key) {
|
|
||||||
eprintln!("DANGLING: edge source '{}'", rel.source_key);
|
|
||||||
dangling += 1;
|
|
||||||
}
|
|
||||||
if !store.nodes.contains_key(&rel.target_key) {
|
|
||||||
eprintln!("DANGLING: edge target '{}'", rel.target_key);
|
|
||||||
dangling += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prune orphan edges
|
|
||||||
let mut to_tombstone = Vec::new();
|
|
||||||
for rel in &store.relations {
|
|
||||||
if rel.deleted { continue; }
|
|
||||||
if !store.nodes.contains_key(&rel.source_key)
|
|
||||||
|| !store.nodes.contains_key(&rel.target_key) {
|
|
||||||
let mut tombstone = rel.clone();
|
|
||||||
tombstone.deleted = true;
|
|
||||||
tombstone.version += 1;
|
|
||||||
to_tombstone.push(tombstone);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !to_tombstone.is_empty() {
|
|
||||||
let count = to_tombstone.len();
|
|
||||||
store.append_relations(&to_tombstone)?;
|
|
||||||
for t in &to_tombstone {
|
|
||||||
if let Some(r) = store.relations.iter_mut().find(|r| r.uuid == t.uuid) {
|
|
||||||
r.deleted = true;
|
|
||||||
r.version = t.version;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
store.save()?;
|
|
||||||
eprintln!("Pruned {} orphan edges", count);
|
|
||||||
}
|
|
||||||
|
|
||||||
let g = store.build_graph();
|
|
||||||
println!("fsck: {} nodes, {} edges, {} issues, {} dangling, {} cache",
|
|
||||||
store.nodes.len(), g.edge_count(), issues, dangling, cache_issues);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_dedup(apply: bool) -> Result<(), String> {
|
|
||||||
use std::collections::{HashMap, HashSet};
|
|
||||||
|
|
||||||
let mut store = store::Store::load()?;
|
|
||||||
let duplicates = store.find_duplicates()?;
|
|
||||||
|
|
||||||
if duplicates.is_empty() {
|
|
||||||
println!("No duplicate keys found.");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count edges per UUID
|
|
||||||
let mut edges_by_uuid: HashMap<[u8; 16], usize> = HashMap::new();
|
|
||||||
for rel in &store.relations {
|
|
||||||
if rel.deleted { continue; }
|
|
||||||
*edges_by_uuid.entry(rel.source).or_default() += 1;
|
|
||||||
*edges_by_uuid.entry(rel.target).or_default() += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut identical_groups = Vec::new();
|
|
||||||
let mut diverged_groups = Vec::new();
|
|
||||||
|
|
||||||
for (key, mut nodes) in duplicates {
|
|
||||||
// Sort by version descending so highest-version is first
|
|
||||||
nodes.sort_by(|a, b| b.version.cmp(&a.version));
|
|
||||||
|
|
||||||
// Check if all copies have identical content
|
|
||||||
let all_same = nodes.windows(2).all(|w| w[0].content == w[1].content);
|
|
||||||
|
|
||||||
let info: Vec<_> = nodes.iter().map(|n| {
|
|
||||||
let edge_count = edges_by_uuid.get(&n.uuid).copied().unwrap_or(0);
|
|
||||||
(n.clone(), edge_count)
|
|
||||||
}).collect();
|
|
||||||
|
|
||||||
if all_same {
|
|
||||||
identical_groups.push((key, info));
|
|
||||||
} else {
|
|
||||||
diverged_groups.push((key, info));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Report
|
|
||||||
println!("=== Duplicate key report ===\n");
|
|
||||||
println!("{} identical groups, {} diverged groups\n",
|
|
||||||
identical_groups.len(), diverged_groups.len());
|
|
||||||
|
|
||||||
if !identical_groups.is_empty() {
|
|
||||||
println!("── Identical (safe to auto-merge) ──");
|
|
||||||
for (key, copies) in &identical_groups {
|
|
||||||
let total_edges: usize = copies.iter().map(|c| c.1).sum();
|
|
||||||
println!(" {} ({} copies, {} total edges)", key, copies.len(), total_edges);
|
|
||||||
for (node, edges) in copies {
|
|
||||||
let uuid_hex = node.uuid.iter().map(|b| format!("{:02x}", b)).collect::<String>();
|
|
||||||
println!(" v{} uuid={}.. edges={}", node.version, &uuid_hex[..8], edges);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
println!();
|
|
||||||
}
|
|
||||||
|
|
||||||
if !diverged_groups.is_empty() {
|
|
||||||
println!("── Diverged (need review) ──");
|
|
||||||
for (key, copies) in &diverged_groups {
|
|
||||||
let total_edges: usize = copies.iter().map(|c| c.1).sum();
|
|
||||||
println!(" {} ({} copies, {} total edges)", key, copies.len(), total_edges);
|
|
||||||
for (node, edges) in copies {
|
|
||||||
let uuid_hex = node.uuid.iter().map(|b| format!("{:02x}", b)).collect::<String>();
|
|
||||||
let preview: String = node.content.chars().take(80).collect();
|
|
||||||
println!(" v{} uuid={}.. edges={} | {}{}",
|
|
||||||
node.version, &uuid_hex[..8], edges, preview,
|
|
||||||
if node.content.len() > 80 { "..." } else { "" });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
println!();
|
|
||||||
}
|
|
||||||
|
|
||||||
if !apply {
|
|
||||||
let total_dupes: usize = identical_groups.iter().chain(diverged_groups.iter())
|
|
||||||
.map(|(_, copies)| copies.len() - 1)
|
|
||||||
.sum();
|
|
||||||
println!("Dry run: {} duplicate nodes would be merged. Use --apply to execute.", total_dupes);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge all groups: identical + diverged
|
|
||||||
// For diverged: keep the copy with most edges (it's the one that got
|
|
||||||
// woven into the graph — the version that lived). Fall back to highest version.
|
|
||||||
let all_groups: Vec<_> = identical_groups.into_iter()
|
|
||||||
.chain(diverged_groups.into_iter())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut merged = 0usize;
|
|
||||||
let mut edges_redirected = 0usize;
|
|
||||||
let mut edges_deduped = 0usize;
|
|
||||||
|
|
||||||
for (_key, mut copies) in all_groups {
|
|
||||||
// Pick survivor: most edges first, then highest version
|
|
||||||
copies.sort_by(|a, b| b.1.cmp(&a.1).then(b.0.version.cmp(&a.0.version)));
|
|
||||||
|
|
||||||
let survivor_uuid = copies[0].0.uuid;
|
|
||||||
let doomed_uuids: Vec<[u8; 16]> = copies[1..].iter().map(|c| c.0.uuid).collect();
|
|
||||||
|
|
||||||
// Redirect edges from doomed UUIDs to survivor
|
|
||||||
let mut updated_rels = Vec::new();
|
|
||||||
for rel in &mut store.relations {
|
|
||||||
if rel.deleted { continue; }
|
|
||||||
let mut changed = false;
|
|
||||||
if doomed_uuids.contains(&rel.source) {
|
|
||||||
rel.source = survivor_uuid;
|
|
||||||
changed = true;
|
|
||||||
}
|
|
||||||
if doomed_uuids.contains(&rel.target) {
|
|
||||||
rel.target = survivor_uuid;
|
|
||||||
changed = true;
|
|
||||||
}
|
|
||||||
if changed {
|
|
||||||
rel.version += 1;
|
|
||||||
updated_rels.push(rel.clone());
|
|
||||||
edges_redirected += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dedup edges: same (source, target, rel_type) → keep highest strength
|
|
||||||
let mut seen: HashSet<([u8; 16], [u8; 16], String)> = HashSet::new();
|
|
||||||
let mut to_tombstone_rels = Vec::new();
|
|
||||||
// Sort by strength descending so we keep the strongest
|
|
||||||
let mut rels_with_idx: Vec<(usize, &store::Relation)> = store.relations.iter()
|
|
||||||
.enumerate()
|
|
||||||
.filter(|(_, r)| !r.deleted && (r.source == survivor_uuid || r.target == survivor_uuid))
|
|
||||||
.collect();
|
|
||||||
rels_with_idx.sort_by(|a, b| b.1.strength.total_cmp(&a.1.strength));
|
|
||||||
|
|
||||||
for (idx, rel) in &rels_with_idx {
|
|
||||||
let edge_key = (rel.source, rel.target, format!("{:?}", rel.rel_type));
|
|
||||||
if !seen.insert(edge_key) {
|
|
||||||
to_tombstone_rels.push(*idx);
|
|
||||||
edges_deduped += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for &idx in &to_tombstone_rels {
|
|
||||||
store.relations[idx].deleted = true;
|
|
||||||
store.relations[idx].version += 1;
|
|
||||||
updated_rels.push(store.relations[idx].clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tombstone doomed nodes
|
|
||||||
let mut tombstones = Vec::new();
|
|
||||||
for (doomed_node, _) in &copies[1..] {
|
|
||||||
let mut t = doomed_node.clone();
|
|
||||||
t.deleted = true;
|
|
||||||
t.version += 1;
|
|
||||||
tombstones.push(t);
|
|
||||||
}
|
|
||||||
|
|
||||||
store.append_nodes(&tombstones)?;
|
|
||||||
if !updated_rels.is_empty() {
|
|
||||||
store.append_relations(&updated_rels)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
for uuid in &doomed_uuids {
|
|
||||||
store.uuid_to_key.remove(uuid);
|
|
||||||
}
|
|
||||||
|
|
||||||
merged += doomed_uuids.len();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove tombstoned relations from cache
|
|
||||||
store.relations.retain(|r| !r.deleted);
|
|
||||||
store.save()?;
|
|
||||||
|
|
||||||
println!("Merged {} duplicates, redirected {} edges, deduped {} duplicate edges",
|
|
||||||
merged, edges_redirected, edges_deduped);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_health() -> Result<(), String> {
|
|
||||||
let store = store::Store::load()?;
|
|
||||||
let g = store.build_graph();
|
|
||||||
let report = graph::health_report(&g, &store);
|
|
||||||
print!("{}", report);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_status() -> Result<(), String> {
|
fn cmd_status() -> Result<(), String> {
|
||||||
// If stdout is a tty and daemon is running, launch TUI
|
// If stdout is a tty and daemon is running, launch TUI
|
||||||
if std::io::IsTerminal::is_terminal(&std::io::stdout()) {
|
if std::io::IsTerminal::is_terminal(&std::io::stdout()) {
|
||||||
|
|
@ -1416,18 +1021,6 @@ fn cmd_status() -> Result<(), String> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cmd_consolidate_batch(count: usize, auto: bool, agent: Option<String>) -> Result<(), String> {
|
|
||||||
let store = store::Store::load()?;
|
|
||||||
|
|
||||||
if let Some(agent_name) = agent {
|
|
||||||
let batch = agents::prompts::agent_prompt(&store, &agent_name, count)?;
|
|
||||||
println!("{}", batch.prompt);
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
agents::prompts::consolidation_batch(&store, count, auto)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_log() -> Result<(), String> {
|
fn cmd_log() -> Result<(), String> {
|
||||||
let store = store::Store::load()?;
|
let store = store::Store::load()?;
|
||||||
for event in store.retrieval_log.iter().rev().take(20) {
|
for event in store.retrieval_log.iter().rev().take(20) {
|
||||||
|
|
@ -1451,37 +1044,6 @@ fn cmd_params() -> Result<(), String> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cmd_replay_queue(count: usize) -> Result<(), String> {
|
|
||||||
let store = store::Store::load()?;
|
|
||||||
let queue = neuro::replay_queue(&store, count);
|
|
||||||
println!("Replay queue ({} items):", queue.len());
|
|
||||||
for (i, item) in queue.iter().enumerate() {
|
|
||||||
println!(" {:2}. [{:.3}] {:>10} {} (interval={}d, emotion={:.1}, spectral={:.1})",
|
|
||||||
i + 1, item.priority, item.classification, item.key,
|
|
||||||
item.interval_days, item.emotion, item.outlier_score);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_consolidate_session() -> Result<(), String> {
|
|
||||||
let store = store::Store::load()?;
|
|
||||||
let plan = neuro::consolidation_plan(&store);
|
|
||||||
println!("{}", neuro::format_plan(&plan));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_consolidate_full() -> Result<(), String> {
|
|
||||||
let mut store = store::Store::load()?;
|
|
||||||
consolidate::consolidate_full(&mut store)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_daily_check() -> Result<(), String> {
|
|
||||||
let store = store::Store::load()?;
|
|
||||||
let report = neuro::daily_check(&store);
|
|
||||||
print!("{}", report);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Apply links from a single agent result JSON file.
|
/// Apply links from a single agent result JSON file.
|
||||||
/// Returns (links_applied, errors).
|
/// Returns (links_applied, errors).
|
||||||
fn apply_agent_file(
|
fn apply_agent_file(
|
||||||
|
|
@ -1563,140 +1125,6 @@ fn apply_agent_file(
|
||||||
(applied, errors)
|
(applied, errors)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cmd_apply_agent(process_all: bool) -> Result<(), String> {
|
|
||||||
let results_dir = store::memory_dir().join("agent-results");
|
|
||||||
|
|
||||||
if !results_dir.exists() {
|
|
||||||
println!("No agent results directory");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut store = store::Store::load()?;
|
|
||||||
let mut applied = 0;
|
|
||||||
let mut errors = 0;
|
|
||||||
|
|
||||||
let mut files: Vec<_> = std::fs::read_dir(&results_dir)
|
|
||||||
.map_err(|e| format!("read results dir: {}", e))?
|
|
||||||
.filter_map(|e| e.ok())
|
|
||||||
.filter(|e| e.path().extension().map(|x| x == "json").unwrap_or(false))
|
|
||||||
.collect();
|
|
||||||
files.sort_by_key(|e| e.path());
|
|
||||||
|
|
||||||
for entry in &files {
|
|
||||||
let path = entry.path();
|
|
||||||
let content = match std::fs::read_to_string(&path) {
|
|
||||||
Ok(c) => c,
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!(" Skip {}: {}", path.display(), e);
|
|
||||||
errors += 1;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let data: serde_json::Value = match serde_json::from_str(&content) {
|
|
||||||
Ok(d) => d,
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!(" Skip {}: parse error: {}", path.display(), e);
|
|
||||||
errors += 1;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
println!("Processing {}:", path.file_name().unwrap().to_string_lossy());
|
|
||||||
let (a, e) = apply_agent_file(&mut store, &data);
|
|
||||||
applied += a;
|
|
||||||
errors += e;
|
|
||||||
|
|
||||||
if !process_all {
|
|
||||||
let done_dir = util::memory_subdir("agent-results/done")?;
|
|
||||||
let dest = done_dir.join(path.file_name().unwrap());
|
|
||||||
std::fs::rename(&path, &dest).ok();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if applied > 0 {
|
|
||||||
store.save()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("\nApplied {} links ({} errors, {} files processed)",
|
|
||||||
applied, errors, files.len());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_digest(level: DigestLevel) -> Result<(), String> {
|
|
||||||
let mut store = store::Store::load()?;
|
|
||||||
|
|
||||||
match level {
|
|
||||||
DigestLevel::Auto => digest::digest_auto(&mut store),
|
|
||||||
DigestLevel::Daily { date } => {
|
|
||||||
let arg = date.unwrap_or_else(|| store::format_date(store::now_epoch()));
|
|
||||||
digest::generate(&mut store, "daily", &arg)
|
|
||||||
}
|
|
||||||
DigestLevel::Weekly { date } => {
|
|
||||||
let arg = date.unwrap_or_else(|| store::format_date(store::now_epoch()));
|
|
||||||
digest::generate(&mut store, "weekly", &arg)
|
|
||||||
}
|
|
||||||
DigestLevel::Monthly { date } => {
|
|
||||||
let arg = date.unwrap_or_else(|| store::format_date(store::now_epoch()));
|
|
||||||
digest::generate(&mut store, "monthly", &arg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_digest_links(do_apply: bool) -> Result<(), String> {
|
|
||||||
let store = store::Store::load()?;
|
|
||||||
let links = digest::parse_all_digest_links(&store);
|
|
||||||
drop(store);
|
|
||||||
println!("Found {} unique links from digest nodes", links.len());
|
|
||||||
|
|
||||||
if !do_apply {
|
|
||||||
for (i, link) in links.iter().enumerate() {
|
|
||||||
println!(" {:3}. {} → {}", i + 1, link.source, link.target);
|
|
||||||
if !link.reason.is_empty() {
|
|
||||||
println!(" ({})", &link.reason[..link.reason.len().min(80)]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
println!("\nTo apply: poc-memory digest-links --apply");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut store = store::Store::load()?;
|
|
||||||
let (applied, skipped, fallbacks) = digest::apply_digest_links(&mut store, &links);
|
|
||||||
println!("\nApplied: {} ({} file-level fallbacks) Skipped: {}", applied, fallbacks, skipped);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_journal_enrich(jsonl_path: &str, entry_text: &str, grep_line: usize) -> Result<(), String> {
|
|
||||||
if !std::path::Path::new(jsonl_path).is_file() {
|
|
||||||
return Err(format!("JSONL not found: {}", jsonl_path));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut store = store::Store::load()?;
|
|
||||||
enrich::journal_enrich(&mut store, jsonl_path, entry_text, grep_line)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_experience_mine(jsonl_path: Option<String>) -> Result<(), String> {
|
|
||||||
let jsonl_path = match jsonl_path {
|
|
||||||
Some(p) => p,
|
|
||||||
None => find_current_transcript()
|
|
||||||
.ok_or("no JSONL transcripts found")?,
|
|
||||||
};
|
|
||||||
|
|
||||||
if !std::path::Path::new(jsonl_path.as_str()).is_file() {
|
|
||||||
return Err(format!("JSONL not found: {}", jsonl_path));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut store = store::Store::load()?;
|
|
||||||
let count = enrich::experience_mine(&mut store, &jsonl_path, None)?;
|
|
||||||
println!("Done: {} new entries mined.", count);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_apply_consolidation(do_apply: bool, report_file: Option<&str>) -> Result<(), String> {
|
|
||||||
let mut store = store::Store::load()?;
|
|
||||||
consolidate::apply_consolidation(&mut store, do_apply, report_file)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_group_content(group: &config::ContextGroup, store: &store::Store, cfg: &config::Config) -> Vec<(String, String)> {
|
fn get_group_content(group: &config::ContextGroup, store: &store::Store, cfg: &config::Config) -> Vec<(String, String)> {
|
||||||
match group.source {
|
match group.source {
|
||||||
config::ContextSource::Journal => {
|
config::ContextSource::Journal => {
|
||||||
|
|
@ -1886,75 +1314,6 @@ fn cmd_tail(n: usize, full: bool) -> Result<(), String> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cmd_import(files: &[String]) -> Result<(), String> {
|
|
||||||
if files.is_empty() {
|
|
||||||
return Err("import requires at least one file path".into());
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut store = store::Store::load()?;
|
|
||||||
let mut total_new = 0;
|
|
||||||
let mut total_updated = 0;
|
|
||||||
|
|
||||||
for arg in files {
|
|
||||||
let path = std::path::PathBuf::from(arg);
|
|
||||||
let resolved = if path.exists() {
|
|
||||||
path
|
|
||||||
} else {
|
|
||||||
let mem_path = store::memory_dir().join(arg);
|
|
||||||
if !mem_path.exists() {
|
|
||||||
eprintln!("File not found: {}", arg);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
mem_path
|
|
||||||
};
|
|
||||||
let (n, u) = store.import_file(&resolved)?;
|
|
||||||
total_new += n;
|
|
||||||
total_updated += u;
|
|
||||||
}
|
|
||||||
|
|
||||||
if total_new > 0 || total_updated > 0 {
|
|
||||||
store.save()?;
|
|
||||||
}
|
|
||||||
println!("Import: {} new, {} updated", total_new, total_updated);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_export(files: &[String], export_all: bool) -> Result<(), String> {
|
|
||||||
let store = store::Store::load()?;
|
|
||||||
|
|
||||||
let targets: Vec<String> = if export_all {
|
|
||||||
let mut files: Vec<String> = store.nodes.keys()
|
|
||||||
.filter(|k| !k.contains('#'))
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
files.sort();
|
|
||||||
files
|
|
||||||
} else if files.is_empty() {
|
|
||||||
return Err("export requires file keys or --all".into());
|
|
||||||
} else {
|
|
||||||
files.iter().map(|a| {
|
|
||||||
a.strip_suffix(".md").unwrap_or(a).to_string()
|
|
||||||
}).collect()
|
|
||||||
};
|
|
||||||
|
|
||||||
let mem_dir = store::memory_dir();
|
|
||||||
|
|
||||||
for file_key in &targets {
|
|
||||||
match store.export_to_markdown(file_key) {
|
|
||||||
Some(content) => {
|
|
||||||
let out_path = mem_dir.join(format!("{}.md", file_key));
|
|
||||||
std::fs::write(&out_path, &content)
|
|
||||||
.map_err(|e| format!("write {}: {}", out_path.display(), e))?;
|
|
||||||
let section_count = content.matches("<!-- mem:").count() + 1;
|
|
||||||
println!("Exported {} ({} sections)", file_key, section_count);
|
|
||||||
}
|
|
||||||
None => eprintln!("No nodes for '{}'", file_key),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_journal_write(text: &[String]) -> Result<(), String> {
|
fn cmd_journal_write(text: &[String]) -> Result<(), String> {
|
||||||
if text.is_empty() {
|
if text.is_empty() {
|
||||||
return Err("journal-write requires text".into());
|
return Err("journal-write requires text".into());
|
||||||
|
|
@ -2116,6 +1475,103 @@ fn cmd_query(expr: &[String]) -> Result<(), String> {
|
||||||
query::run_query(&store, &graph, &query_str)
|
query::run_query(&store, &graph, &query_str)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn cmd_apply_agent(process_all: bool) -> Result<(), String> {
|
||||||
|
let results_dir = store::memory_dir().join("agent-results");
|
||||||
|
|
||||||
|
if !results_dir.exists() {
|
||||||
|
println!("No agent results directory");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut store = store::Store::load()?;
|
||||||
|
let mut applied = 0;
|
||||||
|
let mut errors = 0;
|
||||||
|
|
||||||
|
let mut files: Vec<_> = std::fs::read_dir(&results_dir)
|
||||||
|
.map_err(|e| format!("read results dir: {}", e))?
|
||||||
|
.filter_map(|e| e.ok())
|
||||||
|
.filter(|e| e.path().extension().map(|x| x == "json").unwrap_or(false))
|
||||||
|
.collect();
|
||||||
|
files.sort_by_key(|e| e.path());
|
||||||
|
|
||||||
|
for entry in &files {
|
||||||
|
let path = entry.path();
|
||||||
|
let content = match std::fs::read_to_string(&path) {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!(" Skip {}: {}", path.display(), e);
|
||||||
|
errors += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let data: serde_json::Value = match serde_json::from_str(&content) {
|
||||||
|
Ok(d) => d,
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!(" Skip {}: parse error: {}", path.display(), e);
|
||||||
|
errors += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
println!("Processing {}:", path.file_name().unwrap().to_string_lossy());
|
||||||
|
let (a, e) = apply_agent_file(&mut store, &data);
|
||||||
|
applied += a;
|
||||||
|
errors += e;
|
||||||
|
|
||||||
|
if !process_all {
|
||||||
|
let done_dir = crate::util::memory_subdir("agent-results/done")?;
|
||||||
|
let dest = done_dir.join(path.file_name().unwrap());
|
||||||
|
std::fs::rename(&path, &dest).ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if applied > 0 {
|
||||||
|
store.save()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("\nApplied {} links ({} errors, {} files processed)",
|
||||||
|
applied, errors, files.len());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cmd_digest(level: DigestLevel) -> Result<(), String> {
|
||||||
|
let mut store = store::Store::load()?;
|
||||||
|
|
||||||
|
match level {
|
||||||
|
DigestLevel::Auto => digest::digest_auto(&mut store),
|
||||||
|
DigestLevel::Daily { date } => {
|
||||||
|
let arg = date.unwrap_or_else(|| store::format_date(store::now_epoch()));
|
||||||
|
digest::generate(&mut store, "daily", &arg)
|
||||||
|
}
|
||||||
|
DigestLevel::Weekly { date } => {
|
||||||
|
let arg = date.unwrap_or_else(|| store::format_date(store::now_epoch()));
|
||||||
|
digest::generate(&mut store, "weekly", &arg)
|
||||||
|
}
|
||||||
|
DigestLevel::Monthly { date } => {
|
||||||
|
let arg = date.unwrap_or_else(|| store::format_date(store::now_epoch()));
|
||||||
|
digest::generate(&mut store, "monthly", &arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cmd_experience_mine(jsonl_path: Option<String>) -> Result<(), String> {
|
||||||
|
let jsonl_path = match jsonl_path {
|
||||||
|
Some(p) => p,
|
||||||
|
None => find_current_transcript()
|
||||||
|
.ok_or("no JSONL transcripts found")?,
|
||||||
|
};
|
||||||
|
|
||||||
|
if !std::path::Path::new(jsonl_path.as_str()).is_file() {
|
||||||
|
return Err(format!("JSONL not found: {}", jsonl_path));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut store = store::Store::load()?;
|
||||||
|
let count = crate::enrich::experience_mine(&mut store, &jsonl_path, None)?;
|
||||||
|
println!("Done: {} new entries mined.", count);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn cmd_daemon(sub: DaemonCmd) -> Result<(), String> {
|
fn cmd_daemon(sub: DaemonCmd) -> Result<(), String> {
|
||||||
match sub {
|
match sub {
|
||||||
DaemonCmd::Start => daemon::run_daemon(),
|
DaemonCmd::Start => daemon::run_daemon(),
|
||||||
|
|
@ -2128,66 +1584,3 @@ fn cmd_daemon(sub: DaemonCmd) -> Result<(), String> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cmd_knowledge_loop(max_cycles: usize, batch_size: usize, window: usize, max_depth: i32) -> Result<(), String> {
|
|
||||||
let config = knowledge::KnowledgeLoopConfig {
|
|
||||||
max_cycles,
|
|
||||||
batch_size,
|
|
||||||
window,
|
|
||||||
max_depth,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let results = knowledge::run_knowledge_loop(&config)?;
|
|
||||||
eprintln!("\nCompleted {} cycles, {} total actions applied",
|
|
||||||
results.len(),
|
|
||||||
results.iter().map(|r| r.total_applied).sum::<usize>());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_fact_mine(path: &str, batch: bool, dry_run: bool, output_file: Option<&str>, min_messages: usize) -> Result<(), String> {
|
|
||||||
let p = std::path::Path::new(path);
|
|
||||||
|
|
||||||
let paths: Vec<std::path::PathBuf> = if batch {
|
|
||||||
if !p.is_dir() {
|
|
||||||
return Err(format!("Not a directory: {}", path));
|
|
||||||
}
|
|
||||||
let mut files: Vec<_> = std::fs::read_dir(p)
|
|
||||||
.map_err(|e| format!("read dir: {}", e))?
|
|
||||||
.filter_map(|e| e.ok())
|
|
||||||
.map(|e| e.path())
|
|
||||||
.filter(|p| p.extension().map(|x| x == "jsonl").unwrap_or(false))
|
|
||||||
.collect();
|
|
||||||
files.sort();
|
|
||||||
eprintln!("Found {} transcripts", files.len());
|
|
||||||
files
|
|
||||||
} else {
|
|
||||||
vec![p.to_path_buf()]
|
|
||||||
};
|
|
||||||
|
|
||||||
let path_refs: Vec<&std::path::Path> = paths.iter().map(|p| p.as_path()).collect();
|
|
||||||
let facts = fact_mine::mine_batch(&path_refs, min_messages, dry_run)?;
|
|
||||||
|
|
||||||
if !dry_run {
|
|
||||||
let json = serde_json::to_string_pretty(&facts)
|
|
||||||
.map_err(|e| format!("serialize: {}", e))?;
|
|
||||||
if let Some(out) = output_file {
|
|
||||||
std::fs::write(out, &json).map_err(|e| format!("write: {}", e))?;
|
|
||||||
eprintln!("\nWrote {} facts to {}", facts.len(), out);
|
|
||||||
} else {
|
|
||||||
println!("{}", json);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
eprintln!("\nTotal: {} facts from {} transcripts", facts.len(), paths.len());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cmd_fact_mine_store(path: &str) -> Result<(), String> {
|
|
||||||
let path = std::path::Path::new(path);
|
|
||||||
if !path.exists() {
|
|
||||||
return Err(format!("File not found: {}", path.display()));
|
|
||||||
}
|
|
||||||
let count = fact_mine::mine_and_store(path, None)?;
|
|
||||||
eprintln!("Stored {} facts", count);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue