graph: use index for bulk reads, skip capnp deserialization

- Add all_keys() to StoreView, use in build_adjacency instead of
  for_each_node (which was ignoring content/weight anyway)
- Add all_key_uuid_pairs() for single-pass uuid mapping
- Extend KEY_TO_UUID to store [uuid:16][node_type:1][timestamp:8]
- for_each_node_meta now reads from index, no capnp needed
- Add NodeType::from_u8() for unpacking

Graph health: 7s → 2s (3.5x faster)

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-13 22:11:50 -04:00
parent b3d0a3ab25
commit faad14dc95
6 changed files with 103 additions and 40 deletions

View file

@ -598,8 +598,8 @@ fn rebuild_index(db_path: &Path, capnp_path: &Path) -> Result<redb::Database> {
return Ok(database);
}
// Track latest (offset, uuid, version, deleted) per key
let mut latest: HashMap<String, (u64, [u8; 16], u32, bool)> = HashMap::new();
// Track latest (offset, uuid, version, deleted, node_type, timestamp) per key
let mut latest: HashMap<String, (u64, [u8; 16], u32, bool, u8, i64)> = HashMap::new();
let file = fs::File::open(capnp_path)
.with_context(|| format!("open {}", capnp_path.display()))?;
@ -630,6 +630,10 @@ fn rebuild_index(db_path: &Path, capnp_path: &Path) -> Result<redb::Database> {
let version = node_reader.get_version();
let deleted = node_reader.get_deleted();
let node_type = node_reader.get_node_type()
.map(|t| t as u8)
.unwrap_or(0);
let timestamp = node_reader.get_timestamp();
let mut uuid = [0u8; 16];
if let Ok(data) = node_reader.get_uuid() {
@ -640,10 +644,10 @@ fn rebuild_index(db_path: &Path, capnp_path: &Path) -> Result<redb::Database> {
// Keep if newer version
let dominated = latest.get(&key)
.map(|(_, _, v, _)| version >= *v)
.map(|(_, _, v, _, _, _)| version >= *v)
.unwrap_or(true);
if dominated {
latest.insert(key, (offset, uuid, version, deleted));
latest.insert(key, (offset, uuid, version, deleted, node_type, timestamp));
}
}
}
@ -656,10 +660,15 @@ fn rebuild_index(db_path: &Path, capnp_path: &Path) -> Result<redb::Database> {
let mut key_uuid_table = txn.open_table(index::KEY_TO_UUID)?;
let mut uuid_offsets = txn.open_multimap_table(index::UUID_OFFSETS)?;
for (key, (offset, uuid, _, deleted)) in latest {
for (key, (offset, uuid, _, deleted, node_type, timestamp)) in latest {
if !deleted {
nodes_table.insert(key.as_str(), offset)?;
key_uuid_table.insert(key.as_str(), uuid.as_slice())?;
// Pack: [uuid:16][node_type:1][timestamp:8] = 25 bytes
let mut packed = [0u8; 25];
packed[0..16].copy_from_slice(&uuid);
packed[16] = node_type;
packed[17..25].copy_from_slice(&timestamp.to_be_bytes());
key_uuid_table.insert(key.as_str(), packed.as_slice())?;
}
// Always record offset in UUID history (even for deleted)
uuid_offsets.insert(uuid.as_slice(), offset)?;