forked from kent/consciousness
index: add NODES_BY_PROVENANCE with timestamp-sorted values
- Store [negated_timestamp:8][key] as value for descending sort - recent_by_provenance uses index directly, no capnp reads - Eliminates 24k×5 capnp reads from subconscious snapshots Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
parent
a966dd9d5d
commit
19789b7e74
3 changed files with 70 additions and 31 deletions
|
|
@ -598,8 +598,8 @@ fn rebuild_index(db_path: &Path, capnp_path: &Path) -> Result<redb::Database> {
|
|||
return Ok(database);
|
||||
}
|
||||
|
||||
// Track latest (offset, uuid, version, deleted, node_type, timestamp) per key
|
||||
let mut latest: HashMap<String, (u64, [u8; 16], u32, bool, u8, i64)> = HashMap::new();
|
||||
// Track latest (offset, uuid, version, deleted, node_type, timestamp, provenance) per key
|
||||
let mut latest: HashMap<String, (u64, [u8; 16], u32, bool, u8, i64, String)> = HashMap::new();
|
||||
|
||||
let file = fs::File::open(capnp_path)
|
||||
.with_context(|| format!("open {}", capnp_path.display()))?;
|
||||
|
|
@ -634,6 +634,10 @@ fn rebuild_index(db_path: &Path, capnp_path: &Path) -> Result<redb::Database> {
|
|||
.map(|t| t as u8)
|
||||
.unwrap_or(0);
|
||||
let timestamp = node_reader.get_timestamp();
|
||||
let provenance = node_reader.get_provenance().ok()
|
||||
.and_then(|t| t.to_str().ok())
|
||||
.unwrap_or("manual")
|
||||
.to_string();
|
||||
|
||||
let mut uuid = [0u8; 16];
|
||||
if let Ok(data) = node_reader.get_uuid() {
|
||||
|
|
@ -644,10 +648,10 @@ fn rebuild_index(db_path: &Path, capnp_path: &Path) -> Result<redb::Database> {
|
|||
|
||||
// Keep if newer version
|
||||
let dominated = latest.get(&key)
|
||||
.map(|(_, _, v, _, _, _)| version >= *v)
|
||||
.map(|(_, _, v, _, _, _, _)| version >= *v)
|
||||
.unwrap_or(true);
|
||||
if dominated {
|
||||
latest.insert(key, (offset, uuid, version, deleted, node_type, timestamp));
|
||||
latest.insert(key, (offset, uuid, version, deleted, node_type, timestamp, provenance));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -659,8 +663,9 @@ fn rebuild_index(db_path: &Path, capnp_path: &Path) -> Result<redb::Database> {
|
|||
let mut nodes_table = txn.open_table(index::NODES)?;
|
||||
let mut key_uuid_table = txn.open_table(index::KEY_TO_UUID)?;
|
||||
let mut uuid_offsets = txn.open_multimap_table(index::UUID_OFFSETS)?;
|
||||
let mut by_provenance = txn.open_multimap_table(index::NODES_BY_PROVENANCE)?;
|
||||
|
||||
for (key, (offset, uuid, _, deleted, node_type, timestamp)) in latest {
|
||||
for (key, (offset, uuid, _, deleted, node_type, timestamp, provenance)) in latest {
|
||||
if !deleted {
|
||||
nodes_table.insert(key.as_str(), offset)?;
|
||||
// Pack: [uuid:16][node_type:1][timestamp:8] = 25 bytes
|
||||
|
|
@ -669,6 +674,12 @@ fn rebuild_index(db_path: &Path, capnp_path: &Path) -> Result<redb::Database> {
|
|||
packed[16] = node_type;
|
||||
packed[17..25].copy_from_slice(×tamp.to_be_bytes());
|
||||
key_uuid_table.insert(key.as_str(), packed.as_slice())?;
|
||||
// Pack: [negated_timestamp:8][key] for descending sort
|
||||
let neg_ts = (!timestamp).to_be_bytes();
|
||||
let mut prov_val = Vec::with_capacity(8 + key.len());
|
||||
prov_val.extend_from_slice(&neg_ts);
|
||||
prov_val.extend_from_slice(key.as_bytes());
|
||||
by_provenance.insert(provenance.as_str(), prov_val.as_slice())?;
|
||||
}
|
||||
// Always record offset in UUID history (even for deleted)
|
||||
uuid_offsets.insert(uuid.as_slice(), offset)?;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue