diff options
author | Thomas Bertschinger <tahbertschinger@gmail.com> | 2024-01-15 23:41:02 -0700 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2024-01-16 01:47:05 -0500 |
commit | f5baaf48e3e82b1caf9f5cd1207d4d6feba3a2e5 (patch) | |
tree | 59f7b0e4667df7a9d3d5a45725f2aaab3e79b4c5 /c_src/include/linux/bit_spinlock.h | |
parent | fb35dbfdc5a9446fbb856dae5542b23963e28b89 (diff) |
move Rust sources to top level, C sources into c_src
This moves the Rust sources out of rust_src/ and into the top level.
Running the bcachefs executable out of the development tree is now:
$ ./target/release/bcachefs command
or
$ cargo run --profile release -- command
instead of "./bcachefs command".
Building and installing is still:
$ make && make install
Signed-off-by: Thomas Bertschinger <tahbertschinger@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'c_src/include/linux/bit_spinlock.h')
-rw-r--r-- | c_src/include/linux/bit_spinlock.h | 84 |
1 files changed, 84 insertions, 0 deletions
diff --git a/c_src/include/linux/bit_spinlock.h b/c_src/include/linux/bit_spinlock.h new file mode 100644 index 00000000..873f08c2 --- /dev/null +++ b/c_src/include/linux/bit_spinlock.h @@ -0,0 +1,84 @@ +#ifndef __LINUX_BIT_SPINLOCK_H +#define __LINUX_BIT_SPINLOCK_H + +#include <linux/kernel.h> +#include <linux/preempt.h> +#include <linux/futex.h> +#include <urcu/futex.h> + +/* + * The futex wait op wants an explicit 32-bit address and value. If the bitmap + * used for the spinlock is 64-bit, cast down and pass the right 32-bit region + * for the in-kernel checks. The value is the copy that has already been read + * from the atomic op. + * + * The futex wake op interprets the value as the number of waiters to wake (up + * to INT_MAX), so pass that along directly. + */ +static inline void do_futex(int nr, unsigned long *addr, unsigned long v, int futex_flags) +{ + u32 *addr32 = (u32 *) addr; + u32 *v32 = (u32 *) &v; + int shift = 0; + + futex_flags |= FUTEX_PRIVATE_FLAG; + +#if BITS_PER_LONG == 64 +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + shift = (nr >= 32) ? 1 : 0; +#else + shift = (nr < 32) ? 1 : 0; +#endif +#endif + if (shift) { + addr32 += shift; + v32 += shift; + } + /* + * The shift to determine the futex address may have cast away a + * literal wake count value. The value is capped to INT_MAX and thus + * always in the low bytes of v regardless of bit nr. Copy in the wake + * count to whatever 32-bit range was selected. + */ + if (futex_flags == FUTEX_WAKE_PRIVATE) + *v32 = (u32) v; + futex(addr32, futex_flags, *v32, NULL, NULL, 0); +} + +static inline void bit_spin_lock(int nr, unsigned long *_addr) +{ + unsigned long mask; + unsigned long *addr = _addr + (nr / BITS_PER_LONG); + unsigned long v; + + nr &= BITS_PER_LONG - 1; + mask = 1UL << nr; + + while (1) { + v = __atomic_fetch_or(addr, mask, __ATOMIC_ACQUIRE); + if (!(v & mask)) + break; + + do_futex(nr, addr, v, FUTEX_WAIT); + } +} + +static inline void bit_spin_wake(int nr, unsigned long *_addr) +{ + do_futex(nr, _addr, INT_MAX, FUTEX_WAKE); +} + +static inline void bit_spin_unlock(int nr, unsigned long *_addr) +{ + unsigned long mask; + unsigned long *addr = _addr + (nr / BITS_PER_LONG); + + nr &= BITS_PER_LONG - 1; + mask = 1UL << nr; + + __atomic_and_fetch(addr, ~mask, __ATOMIC_RELEASE); + do_futex(nr, addr, INT_MAX, FUTEX_WAKE); +} + +#endif /* __LINUX_BIT_SPINLOCK_H */ + |