drop strip_md_keys()

This commit is contained in:
Kent Overstreet 2026-04-05 01:18:05 -04:00
parent 40ecd63099
commit 59aaaa5742
2 changed files with 0 additions and 109 deletions

View file

@ -36,7 +36,6 @@ pub use types::{
pub use parse::{MemoryUnit, parse_units}; pub use parse::{MemoryUnit, parse_units};
pub use view::{StoreView, AnyView}; pub use view::{StoreView, AnyView};
pub use persist::fsck; pub use persist::fsck;
pub use persist::strip_md_keys;
pub use ops::current_provenance; pub use ops::current_provenance;
use crate::graph::{self, Graph}; use crate::graph::{self, Graph};

View file

@ -718,114 +718,6 @@ impl Store {
} }
} }
/// Strip .md suffix from all node keys and relation key strings.
/// Merges duplicates (bare key + .md key) by keeping the latest version.
pub fn strip_md_keys() -> Result<(), String> {
use super::strip_md_suffix;
let mut store = Store::load()?;
let mut renamed_nodes = 0usize;
let mut renamed_rels = 0usize;
let mut merged = 0usize;
// Collect keys that need renaming
let old_keys: Vec<String> = store.nodes.keys()
.filter(|k| k.ends_with(".md") || k.contains(".md#"))
.cloned()
.collect();
for old_key in &old_keys {
let new_key = strip_md_suffix(old_key);
if new_key == *old_key { continue; }
let mut node = store.nodes.remove(old_key).unwrap();
store.uuid_to_key.remove(&node.uuid);
if let Some(existing) = store.nodes.get(&new_key) {
// Merge: keep whichever has the higher version
if existing.version >= node.version {
eprintln!(" merge {}{} (keeping existing v{})",
old_key, new_key, existing.version);
merged += 1;
continue;
}
eprintln!(" merge {}{} (replacing v{} with v{})",
old_key, new_key, existing.version, node.version);
merged += 1;
}
node.key = new_key.clone();
node.version += 1;
store.uuid_to_key.insert(node.uuid, new_key.clone());
store.nodes.insert(new_key, node);
renamed_nodes += 1;
}
// Fix relation key strings
for rel in &mut store.relations {
let new_source = strip_md_suffix(&rel.source_key);
let new_target = strip_md_suffix(&rel.target_key);
if new_source != rel.source_key || new_target != rel.target_key {
rel.source_key = new_source;
rel.target_key = new_target;
rel.version += 1;
renamed_rels += 1;
}
}
if renamed_nodes == 0 && renamed_rels == 0 && merged == 0 {
eprintln!("No .md suffixes found — store is clean");
return Ok(());
}
eprintln!("Renamed {} nodes, {} relations, merged {} duplicates",
renamed_nodes, renamed_rels, merged);
// Append migrated nodes/relations to the log (preserving history)
let changed_nodes: Vec<_> = old_keys.iter()
.filter_map(|old_key| {
let new_key = strip_md_suffix(old_key);
store.nodes.get(&new_key).cloned()
})
.collect();
if !changed_nodes.is_empty() {
store.append_nodes(&changed_nodes)?;
}
// Invalidate caches so next load replays from logs
for p in [state_path(), snapshot_path()] {
if p.exists() {
fs::remove_file(&p).ok();
}
}
eprintln!("Migration complete (appended to existing logs)");
Ok(())
}
// DO NOT USE. This function destroyed the append-only log history on
// 2026-03-14 when strip_md_keys() called it. It:
//
// 1. Truncates nodes.capnp via File::create() — all historical
// versions of every node are permanently lost
// 2. Writes only from the in-memory store — so any node missing
// due to a loading bug is also permanently lost
// 3. Makes no backup of the old log before overwriting
// 4. Filters out deleted relations, destroying deletion history
//
// The correct approach for migrations is to APPEND new versions
// (with updated keys) and delete markers (for old keys) to the
// existing log, preserving all history.
//
// This function is kept (dead) so the comment survives as a warning.
// If you need log compaction in the future, design it properly:
// back up first, preserve history, and never write from a potentially
// incomplete in-memory snapshot.
#[allow(dead_code)]
fn _rewrite_store_disabled(_store: &Store) -> Result<(), String> {
panic!("rewrite_store is disabled — see comment above");
}
/// Check and repair corrupt capnp log files. /// Check and repair corrupt capnp log files.
/// ///
/// Reads each message sequentially, tracking file position. On the first /// Reads each message sequentially, tracking file position. On the first