store: remove StoreLock and refresh_nodes

With singleton Store (one daemon, RPC for clients), there's no concurrent
writers to capnp log. The file-based flock and incremental refresh logic
was for multi-process coordination we no longer need.

-110 lines of dead concurrency code.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-13 19:13:25 -04:00
parent f413a853d8
commit e48ca2ecad
3 changed files with 9 additions and 110 deletions

View file

@ -16,16 +16,12 @@ pub fn current_provenance() -> String {
impl Store {
/// Add or update a node (appends to log + updates index).
/// Holds StoreLock across refresh + check + write to prevent duplicate UUIDs.
pub fn upsert_node(&mut self, mut node: Node) -> Result<()> {
let _lock = StoreLock::acquire()?;
self.refresh_nodes()?;
if let Some(existing) = self.nodes.get(&node.key) {
node.uuid = existing.uuid;
node.version = existing.version + 1;
}
let offset = self.append_nodes_unlocked(&[node.clone()])?;
let offset = self.append_nodes(&[node.clone()])?;
if let Some(ref database) = self.db {
index::index_node(database, &node.key, offset, &node.uuid)?;
}
@ -63,11 +59,7 @@ impl Store {
}
/// Upsert with explicit provenance (for agent-created nodes).
/// Holds StoreLock across refresh + check + write to prevent duplicate UUIDs.
pub fn upsert_provenance(&mut self, key: &str, content: &str, provenance: &str) -> Result<&'static str> {
let _lock = StoreLock::acquire()?;
self.refresh_nodes()?;
if let Some(existing) = self.nodes.get(key) {
if existing.content == content {
return Ok("unchanged");
@ -77,7 +69,7 @@ impl Store {
node.provenance = provenance.to_string();
node.timestamp = now_epoch();
node.version += 1;
let offset = self.append_nodes_unlocked(std::slice::from_ref(&node))?;
let offset = self.append_nodes(std::slice::from_ref(&node))?;
if let Some(ref database) = self.db {
index::index_node(database, &node.key, offset, &node.uuid)?;
}
@ -86,7 +78,7 @@ impl Store {
} else {
let mut node = new_node(key, content);
node.provenance = provenance.to_string();
let offset = self.append_nodes_unlocked(std::slice::from_ref(&node))?;
let offset = self.append_nodes(std::slice::from_ref(&node))?;
if let Some(ref database) = self.db {
index::index_node(database, &node.key, offset, &node.uuid)?;
}
@ -96,12 +88,8 @@ impl Store {
}
}
/// Soft-delete a node (appends deleted version, removes from cache + redb).
/// Holds StoreLock across refresh + write to see concurrent creates.
/// Soft-delete a node (appends deleted version, removes from index).
pub fn delete_node(&mut self, key: &str) -> Result<()> {
let _lock = StoreLock::acquire()?;
self.refresh_nodes()?;
let prov = current_provenance();
let node = self.nodes.get(key)
@ -112,7 +100,7 @@ impl Store {
deleted.version += 1;
deleted.provenance = prov;
deleted.timestamp = now_epoch();
self.append_nodes_unlocked(std::slice::from_ref(&deleted))?;
self.append_nodes(std::slice::from_ref(&deleted))?;
if let Some(ref database) = self.db {
index::remove_node(database, key, &uuid)?;
}
@ -125,17 +113,10 @@ impl Store {
/// Graph edges (source/target UUIDs) are unaffected — they're already
/// UUID-based. We update the human-readable source_key/target_key strings
/// on relations, and created_at is preserved untouched.
///
/// Appends: (new_key, v+1) + (old_key, deleted, v+1) + updated relations.
/// Holds StoreLock across refresh + write to prevent races.
pub fn rename_node(&mut self, old_key: &str, new_key: &str) -> Result<()> {
if old_key == new_key {
return Ok(());
}
let _lock = StoreLock::acquire()?;
self.refresh_nodes()?;
if self.nodes.contains_key(new_key) {
bail!("Key '{}' already exists", new_key);
}
@ -172,9 +153,9 @@ impl Store {
.collect();
// Persist under single lock
let offset = self.append_nodes_unlocked(&[renamed.clone(), tombstone.clone()])?;
let offset = self.append_nodes(&[renamed.clone(), tombstone.clone()])?;
if !updated_rels.is_empty() {
self.append_relations_unlocked(&updated_rels)?;
self.append_relations(&updated_rels)?;
}
// Update index: remove old key, add renamed