store: wire up RELS index for relations

Complete redb schema with bidirectional relation indexing:
- RELS multimap: uuid → packed(other_uuid, strength, rel_type, is_outgoing)
- Each edge stored twice (once per endpoint) with direction bit
- pack_rel/unpack_rel for 22-byte packed format

Wired up:
- replay_relations indexes all relations on load
- add_relation indexes new relations
- for_each_relation reads from index (graph building)
- add_link uses index for existence check
- set_link_strength finds/updates edges via index
- cap_degree uses index for degree counting and pruning
- rename_node finds edges by uuid

Vec<Relation> still maintained for remaining uses (normalize_strengths,
graph_health diagnostics). To be removed in follow-up.

Co-Authored-By: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2026-04-13 21:12:47 -04:00
parent 8cfe9a4d70
commit 5fe51fbfda
5 changed files with 371 additions and 142 deletions

View file

@ -362,6 +362,13 @@ impl Store {
self.relations = by_uuid.into_values()
.filter(|r| !r.deleted)
.collect();
// Index relations in redb
if let Some(db) = &self.db {
for rel in &self.relations {
index::index_relation(db, &rel.source, &rel.target, rel.strength, rel.rel_type as u8)?;
}
}
Ok(())
}
@ -629,13 +636,16 @@ fn rebuild_index(db_path: &Path, capnp_path: &Path) -> Result<redb::Database> {
let txn = database.begin_write()?;
{
let mut nodes_table = txn.open_table(index::NODES)?;
let mut uuid_table = txn.open_table(index::UUID_TO_KEY)?;
let mut key_uuid_table = txn.open_table(index::KEY_TO_UUID)?;
let mut uuid_offsets = txn.open_multimap_table(index::UUID_OFFSETS)?;
for (key, (offset, uuid, _, deleted)) in latest {
if !deleted {
nodes_table.insert(key.as_str(), offset)?;
uuid_table.insert(uuid.as_slice(), key.as_str())?;
key_uuid_table.insert(key.as_str(), uuid.as_slice())?;
}
// Always record offset in UUID history (even for deleted)
uuid_offsets.insert(uuid.as_slice(), offset)?;
}
}
txn.commit()?;

View file

@ -1,19 +1,36 @@
// redb index tables
//
// capnp logs are source of truth; redb provides indexed access.
// Tables:
// nodes: key → offset in capnp log (u64)
// uuid_to_key: [u8;16] → key
//
// To read a node: lookup offset in redb, seek in capnp file, deserialize.
// Node tables:
// NODES: key → offset (current version)
// KEY_TO_UUID: key → uuid
// UUID_OFFSETS: uuid → offsets (multimap, all versions)
// NODES_BY_PROVENANCE: provenance → keys (multimap)
// NODES_BY_TYPE: [type_byte][timestamp_be] → key (for range queries by type+date)
//
// Relation tables:
// RELS: node_uuid → (other_uuid, strength, rel_type, is_outgoing) packed (multimap)
// Each relation stored twice — once per endpoint with direction bit.
//
// To get key from uuid: UUID_OFFSETS → read_node_at_offset() → node.key
use anyhow::{Context, Result};
use redb::{Database, ReadableDatabase, ReadableTable, TableDefinition};
use redb::{Database, MultimapTableDefinition, ReadableDatabase, ReadableTable, TableDefinition};
use std::path::Path;
// Table definitions - nodes maps key to byte offset in capnp log
// Node tables
pub const NODES: TableDefinition<&str, u64> = TableDefinition::new("nodes");
pub const UUID_TO_KEY: TableDefinition<&[u8], &str> = TableDefinition::new("uuid_to_key");
pub const KEY_TO_UUID: TableDefinition<&str, &[u8]> = TableDefinition::new("key_to_uuid");
pub const UUID_OFFSETS: MultimapTableDefinition<&[u8], u64> = MultimapTableDefinition::new("uuid_offsets");
pub const NODES_BY_PROVENANCE: MultimapTableDefinition<&str, &str> = MultimapTableDefinition::new("nodes_by_provenance");
// Composite key: [node_type: u8][timestamp: i64 BE] for range queries
pub const NODES_BY_TYPE: TableDefinition<&[u8], &str> = TableDefinition::new("nodes_by_type");
// Relations table - each relation stored twice (once per endpoint)
// Value: (other_uuid: [u8;16], strength: f32, rel_type: u8, is_outgoing: bool)
// Packed as 22 bytes: [other_uuid:16][strength:4][rel_type:1][is_outgoing:1]
pub const RELS: MultimapTableDefinition<&[u8], &[u8]> = MultimapTableDefinition::new("rels");
/// Open or create the redb database, ensuring all tables exist.
pub fn open_db(path: &Path) -> Result<Database> {
@ -23,8 +40,14 @@ pub fn open_db(path: &Path) -> Result<Database> {
// Ensure tables exist by opening a write transaction
let txn = db.begin_write()?;
{
// Node tables
let _ = txn.open_table(NODES)?;
let _ = txn.open_table(UUID_TO_KEY)?;
let _ = txn.open_table(KEY_TO_UUID)?;
let _ = txn.open_multimap_table(UUID_OFFSETS)?;
let _ = txn.open_multimap_table(NODES_BY_PROVENANCE)?;
let _ = txn.open_table(NODES_BY_TYPE)?;
// Relations
let _ = txn.open_multimap_table(RELS)?;
}
txn.commit()?;
@ -36,10 +59,12 @@ pub fn index_node(db: &Database, key: &str, offset: u64, uuid: &[u8; 16]) -> Res
let txn = db.begin_write()?;
{
let mut nodes_table = txn.open_table(NODES)?;
let mut uuid_table = txn.open_table(UUID_TO_KEY)?;
let mut key_uuid_table = txn.open_table(KEY_TO_UUID)?;
let mut uuid_offsets = txn.open_multimap_table(UUID_OFFSETS)?;
nodes_table.insert(key, offset)?;
uuid_table.insert(uuid.as_slice(), key)?;
key_uuid_table.insert(key, uuid.as_slice())?;
uuid_offsets.insert(uuid.as_slice(), offset)?;
}
txn.commit()?;
Ok(())
@ -59,15 +84,31 @@ pub fn contains_key(db: &Database, key: &str) -> Result<bool> {
Ok(table.get(key)?.is_some())
}
/// Remove a node from the index.
pub fn remove_node(db: &Database, key: &str, uuid: &[u8; 16]) -> Result<()> {
/// Get a node's UUID from its key.
pub fn get_uuid_for_key(db: &Database, key: &str) -> Result<Option<[u8; 16]>> {
let txn = db.begin_read()?;
let table = txn.open_table(KEY_TO_UUID)?;
match table.get(key)? {
Some(uuid) => {
let slice = uuid.value();
let mut arr = [0u8; 16];
arr.copy_from_slice(slice);
Ok(Some(arr))
}
None => Ok(None),
}
}
/// Remove a node from the index (key mappings only; UUID history preserved).
pub fn remove_node(db: &Database, key: &str, _uuid: &[u8; 16]) -> Result<()> {
let txn = db.begin_write()?;
{
let mut nodes_table = txn.open_table(NODES)?;
let mut uuid_table = txn.open_table(UUID_TO_KEY)?;
let mut key_uuid_table = txn.open_table(KEY_TO_UUID)?;
// Note: UUID_OFFSETS is not cleared - preserves version history
nodes_table.remove(key)?;
uuid_table.remove(uuid.as_slice())?;
key_uuid_table.remove(key)?;
}
txn.commit()?;
Ok(())
@ -84,3 +125,89 @@ pub fn all_keys(db: &Database) -> Result<Vec<String>> {
}
Ok(keys)
}
// ── Relation index operations ──────────────────────────────────────
//
// RELS value format: [other_uuid:16][strength:4][rel_type:1][is_outgoing:1] = 22 bytes
/// Pack relation data into bytes for RELS table.
fn pack_rel(other_uuid: &[u8; 16], strength: f32, rel_type: u8, is_outgoing: bool) -> [u8; 22] {
let mut buf = [0u8; 22];
buf[0..16].copy_from_slice(other_uuid);
buf[16..20].copy_from_slice(&strength.to_be_bytes());
buf[20] = rel_type;
buf[21] = if is_outgoing { 1 } else { 0 };
buf
}
/// Unpack relation data from RELS table.
pub fn unpack_rel(data: &[u8]) -> ([u8; 16], f32, u8, bool) {
let mut other_uuid = [0u8; 16];
other_uuid.copy_from_slice(&data[0..16]);
let strength = f32::from_be_bytes([data[16], data[17], data[18], data[19]]);
let rel_type = data[20];
let is_outgoing = data[21] != 0;
(other_uuid, strength, rel_type, is_outgoing)
}
/// Index a relation: store twice (once per endpoint).
pub fn index_relation(
db: &Database,
source_uuid: &[u8; 16],
target_uuid: &[u8; 16],
strength: f32,
rel_type: u8,
) -> Result<()> {
let txn = db.begin_write()?;
{
let mut rels = txn.open_multimap_table(RELS)?;
// Store outgoing: source → (target, strength, type, true)
let outgoing = pack_rel(target_uuid, strength, rel_type, true);
rels.insert(source_uuid.as_slice(), outgoing.as_slice())?;
// Store incoming: target → (source, strength, type, false)
let incoming = pack_rel(source_uuid, strength, rel_type, false);
rels.insert(target_uuid.as_slice(), incoming.as_slice())?;
}
txn.commit()?;
Ok(())
}
/// Remove a relation from the index.
pub fn remove_relation(
db: &Database,
source_uuid: &[u8; 16],
target_uuid: &[u8; 16],
strength: f32,
rel_type: u8,
) -> Result<()> {
let txn = db.begin_write()?;
{
let mut rels = txn.open_multimap_table(RELS)?;
let outgoing = pack_rel(target_uuid, strength, rel_type, true);
rels.remove(source_uuid.as_slice(), outgoing.as_slice())?;
let incoming = pack_rel(source_uuid, strength, rel_type, false);
rels.remove(target_uuid.as_slice(), incoming.as_slice())?;
}
txn.commit()?;
Ok(())
}
/// Get all edges for a node. Returns (other_uuid, strength, rel_type, is_outgoing).
pub fn edges_for_node(db: &Database, node_uuid: &[u8; 16]) -> Result<Vec<([u8; 16], f32, u8, bool)>> {
let txn = db.begin_read()?;
let rels = txn.open_multimap_table(RELS)?;
let mut edges = Vec::new();
for entry in rels.get(node_uuid.as_slice())? {
let guard = entry?;
let slice = guard.value();
let mut data = [0u8; 22];
data.copy_from_slice(slice);
edges.push(unpack_rel(&data));
}
Ok(edges)
}

View file

@ -28,9 +28,12 @@ impl Store {
Ok(())
}
/// Add a relation (appends to log + updates cache)
/// Add a relation (appends to log + updates cache + indexes)
pub fn add_relation(&mut self, rel: Relation) -> Result<()> {
self.append_relations(std::slice::from_ref(&rel))?;
if let Some(db) = &self.db {
index::index_relation(db, &rel.source, &rel.target, rel.strength, rel.rel_type as u8)?;
}
self.relations.push(rel);
Ok(())
}
@ -148,36 +151,46 @@ impl Store {
tombstone.provenance = prov;
tombstone.timestamp = now_epoch();
// Collect affected relations and update their debug key strings
let updated_rels: Vec<_> = self.relations.iter()
.filter(|r| r.source_key == old_key || r.target_key == old_key)
.map(|r| {
let mut r = r.clone();
r.version += 1;
if r.source_key == old_key { r.source_key = new_key.to_string(); }
if r.target_key == old_key { r.target_key = new_key.to_string(); }
r
})
.collect();
// Persist under single lock
// Persist node changes
let offset = self.append_nodes(&[renamed.clone(), tombstone.clone()])?;
if !updated_rels.is_empty() {
self.append_relations(&updated_rels)?;
}
// Update index: remove old key, add renamed
// Update node index: remove old key, add renamed
if let Some(ref database) = self.db {
index::remove_node(database, old_key, &tombstone.uuid)?;
index::index_node(database, new_key, offset, &renamed.uuid)?;
// Find relations touching this node's UUID and update their key strings
let node_uuid = node.uuid;
let edges = index::edges_for_node(database, &node_uuid)?;
// Build uuid → key map for the other endpoints
let keys = index::all_keys(database)?;
let mut uuid_to_key: HashMap<[u8; 16], String> = HashMap::new();
for k in &keys {
if let Ok(Some(u)) = index::get_uuid_for_key(database, k) {
uuid_to_key.insert(u, k.clone());
}
}
// Update the renamed node's mapping
uuid_to_key.insert(node_uuid, new_key.to_string());
let mut updated_rels = Vec::new();
for (other_uuid, strength, rel_type, is_outgoing) in edges {
let other_key = uuid_to_key.get(&other_uuid).cloned().unwrap_or_default();
let (src_uuid, tgt_uuid, src_key, tgt_key) = if is_outgoing {
(node_uuid, other_uuid, new_key.to_string(), other_key)
} else {
(other_uuid, node_uuid, other_key, new_key.to_string())
};
let mut rel = new_relation(src_uuid, tgt_uuid,
RelationType::from_u8(rel_type), strength,
&src_key, &tgt_key);
rel.version = 2; // indicate update
updated_rels.push(rel);
}
// Update in-memory relations cache
for updated in &updated_rels {
if let Some(r) = self.relations.iter_mut().find(|r| r.uuid == updated.uuid) {
r.source_key = updated.source_key.clone();
r.target_key = updated.target_key.clone();
r.version = updated.version;
if !updated_rels.is_empty() {
self.append_relations(&updated_rels)?;
}
}
@ -186,81 +199,114 @@ impl Store {
/// Cap node degree by soft-deleting edges from mega-hubs.
pub fn cap_degree(&mut self, max_degree: usize) -> Result<(usize, usize)> {
let db = self.db.as_ref().ok_or_else(|| anyhow!("store not loaded"))?;
let keys = index::all_keys(db)?;
// Build uuid ↔ key maps
let mut uuid_to_key: HashMap<[u8; 16], String> = HashMap::new();
let mut key_to_uuid: HashMap<String, [u8; 16]> = HashMap::new();
for key in &keys {
if let Ok(Some(uuid)) = index::get_uuid_for_key(db, key) {
uuid_to_key.insert(uuid, key.clone());
key_to_uuid.insert(key.clone(), uuid);
}
}
// Count degrees per node
let mut node_degree: HashMap<String, usize> = HashMap::new();
for rel in &self.relations {
if rel.deleted { continue; }
*node_degree.entry(rel.source_key.clone()).or_default() += 1;
*node_degree.entry(rel.target_key.clone()).or_default() += 1;
for key in &keys {
let uuid = match key_to_uuid.get(key) {
Some(u) => u,
None => continue,
};
let edges = index::edges_for_node(db, uuid)?;
node_degree.insert(key.clone(), edges.len());
}
let mut node_edges: HashMap<String, Vec<usize>> = HashMap::new();
for (i, rel) in self.relations.iter().enumerate() {
if rel.deleted { continue; }
node_edges.entry(rel.source_key.clone()).or_default().push(i);
node_edges.entry(rel.target_key.clone()).or_default().push(i);
}
let mut to_delete: HashSet<usize> = HashSet::new();
let mut to_delete: HashSet<([u8; 16], [u8; 16])> = HashSet::new();
let mut hubs_capped = 0;
for (_key, edge_indices) in &node_edges {
let active: Vec<usize> = edge_indices.iter()
.filter(|&&i| !to_delete.contains(&i))
.copied()
.collect();
if active.len() <= max_degree { continue; }
let mut auto_indices: Vec<(usize, f32)> = Vec::new();
let mut link_indices: Vec<(usize, usize)> = Vec::new();
for &i in &active {
let rel = &self.relations[i];
if rel.rel_type == RelationType::Auto {
auto_indices.push((i, rel.strength));
} else {
let other = if &rel.source_key == _key {
&rel.target_key
} else {
&rel.source_key
for key in &keys {
let uuid = match key_to_uuid.get(key) {
Some(u) => *u,
None => continue,
};
let other_deg = node_degree.get(other).copied().unwrap_or(0);
link_indices.push((i, other_deg));
let edges = index::edges_for_node(db, &uuid)?;
if edges.len() <= max_degree { continue; }
// Separate auto vs manual edges: (source, target, sort_key)
let mut auto_edges: Vec<([u8; 16], [u8; 16], f32)> = Vec::new();
let mut link_edges: Vec<([u8; 16], [u8; 16], usize)> = Vec::new();
for (other_uuid, strength, rel_type, is_outgoing) in &edges {
// Canonical edge direction (source < target by outgoing flag)
let (src, tgt) = if *is_outgoing { (uuid, *other_uuid) } else { (*other_uuid, uuid) };
if to_delete.contains(&(src, tgt)) { continue; }
let other_key = match uuid_to_key.get(other_uuid) {
Some(k) => k,
None => continue,
};
if *rel_type == RelationType::Auto as u8 {
auto_edges.push((src, tgt, *strength));
} else {
let other_deg = node_degree.get(other_key).copied().unwrap_or(0);
link_edges.push((src, tgt, other_deg));
}
}
let excess = active.len() - max_degree;
let active_count = auto_edges.len() + link_edges.len();
if active_count <= max_degree { continue; }
auto_indices.sort_by(|a, b| a.1.total_cmp(&b.1));
let auto_prune = excess.min(auto_indices.len());
for &(i, _) in auto_indices.iter().take(auto_prune) {
to_delete.insert(i);
let excess = active_count - max_degree;
// Prune weakest auto edges first
auto_edges.sort_by(|a, b| a.2.total_cmp(&b.2));
for (src, tgt, _) in auto_edges.iter().take(excess) {
to_delete.insert((*src, *tgt));
}
let remaining_excess = excess.saturating_sub(auto_prune);
if remaining_excess > 0 {
link_indices.sort_by(|a, b| b.1.cmp(&a.1));
let link_prune = remaining_excess.min(link_indices.len());
for &(i, _) in link_indices.iter().take(link_prune) {
to_delete.insert(i);
// Then prune links to highest-degree nodes
let remaining = excess.saturating_sub(auto_edges.len());
if remaining > 0 {
link_edges.sort_by(|a, b| b.2.cmp(&a.2));
for (src, tgt, _) in link_edges.iter().take(remaining) {
to_delete.insert((*src, *tgt));
}
}
hubs_capped += 1;
}
let mut pruned_rels = Vec::new();
for &i in &to_delete {
self.relations[i].deleted = true;
self.relations[i].version += 1;
pruned_rels.push(self.relations[i].clone());
// Collect edge info for deletion
let mut to_remove: Vec<([u8; 16], [u8; 16], f32, u8, String, String)> = Vec::new();
for (source_uuid, target_uuid) in &to_delete {
let edges = index::edges_for_node(db, source_uuid)?;
if let Some((_, strength, rel_type, _)) = edges.iter()
.find(|(other, _, _, out)| *other == *target_uuid && *out)
{
let source_key = uuid_to_key.get(source_uuid).cloned().unwrap_or_default();
let target_key = uuid_to_key.get(target_uuid).cloned().unwrap_or_default();
to_remove.push((*source_uuid, *target_uuid, *strength, *rel_type, source_key, target_key));
}
}
if !pruned_rels.is_empty() {
self.append_relations(&pruned_rels)?;
// Now mutate: remove from index and persist tombstones
let pruned_count = to_remove.len();
for (source_uuid, target_uuid, strength, rel_type, source_key, target_key) in to_remove {
if let Some(db) = &self.db {
index::remove_relation(db, &source_uuid, &target_uuid, strength, rel_type)?;
}
let mut rel = new_relation(source_uuid, target_uuid,
RelationType::from_u8(rel_type), strength,
&source_key, &target_key);
rel.deleted = true;
rel.version = 2;
self.append_relations(std::slice::from_ref(&rel))?;
}
self.relations.retain(|r| !r.deleted);
Ok((hubs_capped, to_delete.len()))
Ok((hubs_capped, pruned_count))
}
/// Set a node's weight directly. Returns (old, new).
@ -282,54 +328,10 @@ impl Store {
Ok((old, weight))
}
/// Set the strength of a link between two nodes. Deduplicates if
/// multiple links exist. Returns the old strength, or error if no link.
/// Set the strength of a link between two nodes.
/// Returns the old strength. Creates link if it doesn't exist.
pub fn set_link_strength(&mut self, source: &str, target: &str, strength: f32) -> Result<f32> {
let strength = strength.clamp(0.01, 1.0);
let mut old = 0.0f32;
let mut found = false;
let mut first = true;
for rel in &mut self.relations {
if rel.deleted { continue; }
if (rel.source_key == source && rel.target_key == target)
|| (rel.source_key == target && rel.target_key == source)
{
if first {
old = rel.strength;
rel.strength = strength;
first = false;
} else {
rel.deleted = true; // deduplicate
}
found = true;
}
}
if !found {
// Upsert: create the link if it doesn't exist
self.add_link(source, target, "link_set")?;
// Set the strength on the newly created link
for rel in self.relations.iter_mut().rev() {
if !rel.deleted && rel.source_key == source && rel.target_key == target {
rel.strength = strength;
break;
}
}
return Ok(0.0);
}
Ok(old)
}
/// Add a link between two nodes with Jaccard-based initial strength.
/// Returns the strength, or a message if the link already exists.
pub fn add_link(&mut self, source: &str, target: &str, provenance: &str) -> Result<f32> {
// Check for existing
let exists = self.relations.iter().any(|r|
!r.deleted &&
((r.source_key == source && r.target_key == target) ||
(r.source_key == target && r.target_key == source)));
if exists {
bail!("link already exists: {} ↔ {}", source, target);
}
let source_uuid = self.get_node(source)?
.map(|n| n.uuid)
@ -338,6 +340,53 @@ impl Store {
.map(|n| n.uuid)
.ok_or_else(|| anyhow!("target not found: {}", target))?;
// Find existing edge via index
let db = self.db.as_ref().ok_or_else(|| anyhow!("store not loaded"))?;
let edges = index::edges_for_node(db, &source_uuid)?;
let existing = edges.iter().find(|(other, _, _, _)| *other == target_uuid);
if let Some((_, old_strength, rel_type, _)) = existing {
let old = *old_strength;
// Remove old edge from index, add updated one
index::remove_relation(db, &source_uuid, &target_uuid, old, *rel_type)?;
index::index_relation(db, &source_uuid, &target_uuid, strength, *rel_type)?;
// Append updated relation to log
let mut rel = new_relation(source_uuid, target_uuid,
RelationType::from_u8(*rel_type), strength, source, target);
rel.version = 2; // indicate update
self.append_relations(std::slice::from_ref(&rel))?;
Ok(old)
} else {
// Create new link
self.add_link(source, target, "link_set")?;
// Update its strength
let db = self.db.as_ref().ok_or_else(|| anyhow!("store not loaded"))?;
index::remove_relation(db, &source_uuid, &target_uuid, 0.1, RelationType::Link as u8)?;
index::index_relation(db, &source_uuid, &target_uuid, strength, RelationType::Link as u8)?;
Ok(0.0)
}
}
/// Add a link between two nodes with Jaccard-based initial strength.
/// Returns the strength, or a message if the link already exists.
pub fn add_link(&mut self, source: &str, target: &str, provenance: &str) -> Result<f32> {
let source_uuid = self.get_node(source)?
.map(|n| n.uuid)
.ok_or_else(|| anyhow!("source not found: {}", source))?;
let target_uuid = self.get_node(target)?
.map(|n| n.uuid)
.ok_or_else(|| anyhow!("target not found: {}", target))?;
// Check for existing via index
if let Some(db) = &self.db {
let edges = index::edges_for_node(db, &source_uuid)?;
let exists = edges.iter().any(|(other, _, _, _)| *other == target_uuid);
if exists {
bail!("link already exists: {} ↔ {}", source, target);
}
}
let graph = self.build_graph();
let jaccard = graph.jaccard(source, target);
let strength = (jaccard * 3.0).clamp(0.1, 1.0) as f32;

View file

@ -150,6 +150,16 @@ pub enum RelationType {
Auto,
}
impl RelationType {
pub fn from_u8(v: u8) -> Self {
match v {
1 => RelationType::Causal,
2 => RelationType::Auto,
_ => RelationType::Link,
}
}
}
/// Create a new node with defaults
pub fn new_node(key: &str, content: &str) -> Node {
Node {

View file

@ -59,9 +59,42 @@ impl StoreView for Store {
}
fn for_each_relation<F: FnMut(&str, &str, f32, RelationType)>(&self, mut f: F) {
for rel in &self.relations {
if rel.deleted { continue; }
f(&rel.source_key, &rel.target_key, rel.strength, rel.rel_type);
let db = match self.db.as_ref() {
Some(db) => db,
None => return,
};
// Build uuid → key map by iterating all nodes once
let mut uuid_to_key: std::collections::HashMap<[u8; 16], String> = std::collections::HashMap::new();
let keys = match index::all_keys(db) {
Ok(keys) => keys,
Err(_) => return,
};
for key in &keys {
if let Ok(Some(uuid)) = index::get_uuid_for_key(db, key) {
uuid_to_key.insert(uuid, key.clone());
}
}
// Iterate edges: only process outgoing to avoid duplicates
for key in &keys {
let uuid = match index::get_uuid_for_key(db, key) {
Ok(Some(u)) => u,
_ => continue,
};
let edges = match index::edges_for_node(db, &uuid) {
Ok(e) => e,
Err(_) => continue,
};
for (other_uuid, strength, rel_type_byte, is_outgoing) in edges {
if !is_outgoing { continue; } // only process outgoing
let target_key = match uuid_to_key.get(&other_uuid) {
Some(k) => k,
None => continue, // orphan edge
};
let rel_type = RelationType::from_u8(rel_type_byte);
f(key, target_key, strength, rel_type);
}
}
}