Replace rkyv/bincode caching with redb indices
Remove three-tier loading (rkyv snapshot, bincode cache, capnp replay) in favor of direct capnp log replay + redb for indexed access. - Remove all rkyv derives from types (Node, Relation, enums, etc.) - Remove Snapshot struct, RKYV_MAGIC, CACHE_MAGIC constants - Remove load_snapshot_mmap(), save(), save_snapshot() - Remove MmapView, AnyView from view.rs (keep StoreView trait) - Simplify Store::load() to just replay capnp logs - Add db.rs with redb schema: nodes, uuid_to_key, visits, transcript_progress - Simplify cmd_fsck to just check capnp integrity + graph health capnp logs remain source of truth; redb indices will be rebuilt on demand. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
parent
1d88293ccf
commit
2caccf875d
8 changed files with 201 additions and 636 deletions
|
|
@ -60,43 +60,12 @@ pub async fn cmd_init() -> Result<()> {
|
|||
}
|
||||
|
||||
pub async fn cmd_fsck() -> Result<()> {
|
||||
// Check/repair capnp log integrity first
|
||||
store::fsck()?;
|
||||
|
||||
let arc = memory::access_local()?;
|
||||
let mut store = arc.lock().await;
|
||||
|
||||
// Check cache vs log consistency
|
||||
let log_store = store::Store::load_from_logs()?;
|
||||
let mut cache_issues = 0;
|
||||
|
||||
// Nodes in logs but missing from cache
|
||||
for key in log_store.nodes.keys() {
|
||||
if !store.nodes.contains_key(key) {
|
||||
eprintln!("CACHE MISSING: '{}' exists in capnp log but not in cache", key);
|
||||
cache_issues += 1;
|
||||
}
|
||||
}
|
||||
// Nodes in cache but not in logs (phantom nodes)
|
||||
for key in store.nodes.keys() {
|
||||
if !log_store.nodes.contains_key(key) {
|
||||
eprintln!("CACHE PHANTOM: '{}' exists in cache but not in capnp log", key);
|
||||
cache_issues += 1;
|
||||
}
|
||||
}
|
||||
// Version mismatches
|
||||
for (key, log_node) in &log_store.nodes {
|
||||
if let Some(cache_node) = store.nodes.get(key)
|
||||
&& cache_node.version != log_node.version {
|
||||
eprintln!("CACHE STALE: '{}' cache v{} vs log v{}",
|
||||
key, cache_node.version, log_node.version);
|
||||
cache_issues += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if cache_issues > 0 {
|
||||
eprintln!("{} cache inconsistencies found — rebuilding from logs", cache_issues);
|
||||
*store = log_store;
|
||||
store.save().context("rebuild save")?;
|
||||
}
|
||||
|
||||
// Check node-key consistency
|
||||
let mut issues = 0;
|
||||
for (key, node) in &store.nodes {
|
||||
|
|
@ -141,13 +110,12 @@ pub async fn cmd_fsck() -> Result<()> {
|
|||
r.version = t.version;
|
||||
}
|
||||
}
|
||||
store.save()?;
|
||||
eprintln!("Pruned {} orphan edges", count);
|
||||
}
|
||||
|
||||
let g = store.build_graph();
|
||||
println!("fsck: {} nodes, {} edges, {} issues, {} dangling, {} cache",
|
||||
store.nodes.len(), g.edge_count(), issues, dangling, cache_issues);
|
||||
println!("fsck: {} nodes, {} edges, {} issues, {} dangling",
|
||||
store.nodes.len(), g.edge_count(), issues, dangling);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue