From f5baaf48e3e82b1caf9f5cd1207d4d6feba3a2e5 Mon Sep 17 00:00:00 2001 From: Thomas Bertschinger Date: Mon, 15 Jan 2024 23:41:02 -0700 Subject: move Rust sources to top level, C sources into c_src This moves the Rust sources out of rust_src/ and into the top level. Running the bcachefs executable out of the development tree is now: $ ./target/release/bcachefs command or $ cargo run --profile release -- command instead of "./bcachefs command". Building and installing is still: $ make && make install Signed-off-by: Thomas Bertschinger Signed-off-by: Kent Overstreet --- c_src/include/linux/bitops.h | 286 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 286 insertions(+) create mode 100644 c_src/include/linux/bitops.h (limited to 'c_src/include/linux/bitops.h') diff --git a/c_src/include/linux/bitops.h b/c_src/include/linux/bitops.h new file mode 100644 index 00000000..758476b1 --- /dev/null +++ b/c_src/include/linux/bitops.h @@ -0,0 +1,286 @@ +#ifndef _TOOLS_LINUX_BITOPS_H_ +#define _TOOLS_LINUX_BITOPS_H_ + +#include +#include +#include +#include + +#ifndef __WORDSIZE +#define __WORDSIZE (__SIZEOF_LONG__ * 8) +#endif + +#ifndef BITS_PER_LONG +# define BITS_PER_LONG __WORDSIZE +#endif + +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#define BITS_PER_BYTE 8 +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) +#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64)) +#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32)) +#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE) + +static inline void __set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; +} + +static inline void set_bit(long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + __atomic_or_fetch(p, mask, __ATOMIC_RELAXED); +} + +static inline void __clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p &= ~mask; +} + +static inline void clear_bit(long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + __atomic_and_fetch(p, ~mask, __ATOMIC_RELAXED); +} + +static inline int test_bit(long nr, const volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *) addr) + BIT_WORD(nr); + + return (*p & mask) != 0; +} + +static inline int __test_and_set_bit(int nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + + old = *p; + *p = old | mask; + + return (old & mask) != 0; +} + +static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *) addr) + BIT_WORD(nr); + unsigned long old; + + old = __atomic_fetch_or(p, mask, __ATOMIC_RELAXED); + + return (old & mask) != 0; +} + +static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *) addr) + BIT_WORD(nr); + unsigned long old; + + old = __atomic_fetch_and(p, ~mask, __ATOMIC_RELAXED); + + return (old & mask) != 0; +} + +static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + __atomic_and_fetch(p, ~mask, __ATOMIC_RELEASE); +} + +static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *) addr) + BIT_WORD(nr); + unsigned long old; + + old = __atomic_fetch_or(p, mask, __ATOMIC_ACQUIRE); + + return (old & mask) != 0; +} + +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +/* same as for_each_set_bit() but use bit as value to start with */ +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +static inline unsigned long hweight_long(unsigned long w) +{ + return __builtin_popcountl(w); +} + +static inline unsigned long hweight64(u64 w) +{ + return __builtin_popcount((u32) w) + + __builtin_popcount(w >> 32); +} + +static inline unsigned long hweight32(u32 w) +{ + return __builtin_popcount(w); +} + +static inline unsigned long hweight8(unsigned long w) +{ + return __builtin_popcountl(w); +} + +/** + * rol64 - rotate a 64-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 rol64(__u64 word, unsigned int shift) +{ + return (word << shift) | (word >> (64 - shift)); +} + +/** + * ror64 - rotate a 64-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 ror64(__u64 word, unsigned int shift) +{ + return (word >> shift) | (word << (64 - shift)); +} + +/** + * rol32 - rotate a 32-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 rol32(__u32 word, unsigned int shift) +{ + return (word << shift) | (word >> ((-shift) & 31)); +} + +/** + * ror32 - rotate a 32-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 ror32(__u32 word, unsigned int shift) +{ + return (word >> shift) | (word << (32 - shift)); +} + +/** + * rol16 - rotate a 16-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u16 rol16(__u16 word, unsigned int shift) +{ + return (word << shift) | (word >> (16 - shift)); +} + +/** + * ror16 - rotate a 16-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u16 ror16(__u16 word, unsigned int shift) +{ + return (word >> shift) | (word << (16 - shift)); +} + +/** + * rol8 - rotate an 8-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u8 rol8(__u8 word, unsigned int shift) +{ + return (word << shift) | (word >> (8 - shift)); +} + +/** + * ror8 - rotate an 8-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u8 ror8(__u8 word, unsigned int shift) +{ + return (word >> shift) | (word << (8 - shift)); +} + +static inline unsigned long __fls(unsigned long word) +{ + return (sizeof(word) * 8) - 1 - __builtin_clzl(word); +} + +static inline int fls(int x) +{ + return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; +} + +static inline int fls64(__u64 x) +{ +#if BITS_PER_LONG == 32 + __u32 h = x >> 32; + if (h) + return fls(h) + 32; + return fls(x); +#elif BITS_PER_LONG == 64 + if (x == 0) + return 0; + return __fls(x) + 1; +#endif +} + +static inline unsigned fls_long(unsigned long l) +{ + if (sizeof(l) == 4) + return fls(l); + return fls64(l); +} + +static inline unsigned long __ffs(unsigned long word) +{ + return __builtin_ctzl(word); +} + +static inline unsigned long __ffs64(u64 word) +{ +#if BITS_PER_LONG == 32 + if (((u32)word) == 0UL) + return __ffs((u32)(word >> 32)) + 32; +#elif BITS_PER_LONG != 64 +#error BITS_PER_LONG not 32 or 64 +#endif + return __ffs((unsigned long)word); +} + +#define ffz(x) __ffs(~(x)) + +static inline __attribute__((const)) +unsigned long rounddown_pow_of_two(unsigned long n) +{ + return 1UL << (fls_long(n) - 1); +} + +#endif -- cgit v1.2.3