summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-04-26 19:40:09 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2021-04-26 20:18:47 -0400
commita14d39d7ac45eebe45ed0772d1ed837f15b15af4 (patch)
treee05a03cfe138fde9f0bceae21f244aaf00c5d9f5
parentedc3ffe8f25506e60218aa6eb5cbcd3a019f6210 (diff)
bit_spinlocks now use futexes
Spinlocks aren't a good idea in userspace, where we can't actually disable preemption. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--include/linux/bit_spinlock.h44
-rw-r--r--include/linux/rhashtable.h1
2 files changed, 24 insertions, 21 deletions
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index 0e88820a..ed47cc63 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -3,38 +3,40 @@
#include <linux/kernel.h>
#include <linux/preempt.h>
-#include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/futex.h>
-static inline void bit_spin_lock(int bitnum, unsigned long *addr)
+static inline void bit_spin_lock(int nr, unsigned long *_addr)
{
- while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
- do {
- cpu_relax();
- } while (test_bit(bitnum, addr));
- }
-}
+ u32 mask, *addr = ((u32 *) _addr) + (nr / 32), v;
-static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
-{
- return !test_and_set_bit_lock(bitnum, addr);
-}
+ nr &= 31;
+ mask = 1U << nr;
-static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
-{
- BUG_ON(!test_bit(bitnum, addr));
+ while (1) {
+ v = __atomic_fetch_or(addr, mask, __ATOMIC_ACQUIRE);
+ if (!(v & mask))
+ break;
- clear_bit_unlock(bitnum, addr);
+ futex(addr, FUTEX_WAIT|FUTEX_PRIVATE_FLAG, v, NULL, NULL, 0);
+ }
}
-static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+static inline void bit_spin_wake(int nr, unsigned long *_addr)
{
- bit_spin_unlock(bitnum, addr);
+ u32 *addr = ((u32 *) _addr) + (nr / 32);
+
+ futex(addr, FUTEX_WAKE|FUTEX_PRIVATE_FLAG, INT_MAX, NULL, NULL, 0);
}
-static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
+static inline void bit_spin_unlock(int nr, unsigned long *_addr)
{
- return test_bit(bitnum, addr);
+ u32 mask, *addr = ((u32 *) _addr) + (nr / 32);
+
+ nr &= 31;
+ mask = 1U << nr;
+
+ __atomic_and_fetch(addr, ~mask, __ATOMIC_RELEASE);
+ futex(addr, FUTEX_WAKE|FUTEX_PRIVATE_FLAG, INT_MAX, NULL, NULL, 0);
}
#endif /* __LINUX_BIT_SPINLOCK_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 6cf8c257..c5e717bf 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -395,6 +395,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
__release(bitlock);
+ bit_spin_wake(0, (unsigned long *) bkt);
}
/**