summaryrefslogtreecommitdiff
path: root/libbcachefs/buckets.h
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/buckets.h')
-rw-r--r--libbcachefs/buckets.h35
1 files changed, 31 insertions, 4 deletions
diff --git a/libbcachefs/buckets.h b/libbcachefs/buckets.h
index f192809f..ecbeb728 100644
--- a/libbcachefs/buckets.h
+++ b/libbcachefs/buckets.h
@@ -40,15 +40,42 @@ static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, secto
for (_b = (_buckets)->b + (_buckets)->first_bucket; \
_b < (_buckets)->b + (_buckets)->nbuckets; _b++)
+/*
+ * Ugly hack alert:
+ *
+ * We need to cram a spinlock in a single byte, because that's what we have left
+ * in struct bucket, and we care about the size of these - during fsck, we need
+ * in memory state for every single bucket on every device.
+ *
+ * We used to do
+ * while (xchg(&b->lock, 1) cpu_relax();
+ * but, it turns out not all architectures support xchg on a single byte.
+ *
+ * So now we use bit_spin_lock(), with fun games since we can't burn a whole
+ * ulong for this - we just need to make sure the lock bit always ends up in the
+ * first byte.
+ */
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define BUCKET_LOCK_BITNR 0
+#else
+#define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
+#endif
+
+union ulong_byte_assert {
+ ulong ulong;
+ u8 byte;
+};
+
static inline void bucket_unlock(struct bucket *b)
{
- smp_store_release(&b->lock, 0);
+ BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
+ bit_spin_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
}
static inline void bucket_lock(struct bucket *b)
{
- while (xchg(&b->lock, 1))
- cpu_relax();
+ bit_spin_lock(BUCKET_LOCK_BITNR, (void *) &b->lock);
}
static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)
@@ -180,7 +207,7 @@ static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_waterma
switch (watermark) {
case BCH_WATERMARK_NR:
- unreachable();
+ BUG();
case BCH_WATERMARK_stripe:
reserved += ca->mi.nbuckets >> 6;
fallthrough;