summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/atomic.h405
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--include/linux/cache.h3
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/kernel.h16
-rw-r--r--include/linux/page.h7
6 files changed, 234 insertions, 201 deletions
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index ad33ad3b..7471bd97 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -4,11 +4,58 @@
#include <linux/compiler.h>
#include <linux/types.h>
-#define xchg(p, v) \
- __atomic_exchange_n(p, v, __ATOMIC_SEQ_CST)
+typedef struct {
+ int counter;
+} atomic_t;
+
+typedef struct {
+ long counter;
+} atomic_long_t;
+
+typedef struct {
+ u64 counter;
+} atomic64_t;
+
+#ifndef C11_ATOMICS
+
+#include <urcu/uatomic.h>
+
+#if (CAA_BITS_PER_LONG != 64)
+#define ATOMIC64_SPINLOCK
+#endif
+
+#define __ATOMIC_READ(p) uatomic_read(p)
+#define __ATOMIC_SET(p, v) uatomic_set(p, v)
+#define __ATOMIC_ADD_RETURN(v, p) uatomic_add_return(p, v)
+#define __ATOMIC_SUB_RETURN(v, p) uatomic_sub_return(p, v)
+#define __ATOMIC_ADD(v, p) uatomic_add(p, v)
+#define __ATOMIC_SUB(v, p) uatomic_sub(p, v)
+#define __ATOMIC_INC(p) uatomic_inc(p)
+#define __ATOMIC_DEC(p) uatomic_dec(p)
+
+#define xchg(p, v) uatomic_xchg(p, v)
+#define xchg_acquire(p, v) uatomic_xchg(p, v)
+#define cmpxchg(p, old, new) uatomic_cmpxchg(p, old, new)
+#define cmpxchg_acquire(p, old, new) uatomic_cmpxchg(p, old, new)
-#define xchg_acquire(p, v) \
- __atomic_exchange_n(p, v, __ATOMIC_ACQUIRE)
+#define smp_mb__before_atomic() cmm_smp_mb__before_uatomic_add()
+#define smp_mb__after_atomic() cmm_smp_mb__after_uatomic_add()
+#define smp_wmb() cmm_smp_wmb()
+#define smp_rmb() cmm_smp_rmb()
+#define smp_mb() cmm_smp_mb()
+#define smp_read_barrier_depends() cmm_smp_read_barrier_depends()
+
+#else /* C11_ATOMICS */
+
+#define __ATOMIC_READ(p) __atomic_load_n(p, __ATOMIC_RELAXED)
+#define __ATOMIC_SET(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED)
+#define __ATOMIC_ADD_RETURN(v, p) __atomic_add_fetch(p, v, __ATOMIC_RELAXED)
+#define __ATOMIC_ADD_RETURN_RELEASE(v, p) \
+ __atomic_add_fetch(p, v, __ATOMIC_RELEASE)
+#define __ATOMIC_SUB_RETURN(v, p) __atomic_sub_fetch(p, v, __ATOMIC_RELAXED)
+
+#define xchg(p, v) __atomic_exchange_n(p, v, __ATOMIC_SEQ_CST)
+#define xchg_acquire(p, v) __atomic_exchange_n(p, v, __ATOMIC_ACQUIRE)
#define cmpxchg(p, old, new) \
({ \
@@ -37,6 +84,8 @@
#define smp_mb() __atomic_thread_fence(__ATOMIC_SEQ_CST)
#define smp_read_barrier_depends()
+#endif
+
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
#define smp_load_acquire(p) \
@@ -52,199 +101,179 @@ do { \
WRITE_ONCE(*p, v); \
} while (0)
-typedef struct {
- int counter;
-} atomic_t;
-
-static inline int atomic_read(const atomic_t *v)
-{
- return __atomic_load_n(&v->counter, __ATOMIC_RELAXED);
-}
-
-static inline void atomic_set(atomic_t *v, int i)
-{
- __atomic_store_n(&v->counter, i, __ATOMIC_RELAXED);
+/* atomic interface: */
+
+#ifndef __ATOMIC_ADD
+#define __ATOMIC_ADD(i, v) __ATOMIC_ADD_RETURN(i, v)
+#endif
+
+#ifndef __ATOMIC_ADD_RETURN_RELEASE
+#define __ATOMIC_ADD_RETURN_RELEASE(i, v) \
+ ({ smp_mb__before_atomic(); __ATOMIC_ADD_RETURN(i, v); })
+#endif
+
+#ifndef __ATOMIC_SUB
+#define __ATOMIC_SUB(i, v) __ATOMIC_SUB_RETURN(i, v)
+#endif
+
+#ifndef __ATOMIC_INC_RETURN
+#define __ATOMIC_INC_RETURN(v) __ATOMIC_ADD_RETURN(1, v)
+#endif
+
+#ifndef __ATOMIC_DEC_RETURN
+#define __ATOMIC_DEC_RETURN(v) __ATOMIC_SUB_RETURN(1, v)
+#endif
+
+#ifndef __ATOMIC_INC
+#define __ATOMIC_INC(v) __ATOMIC_ADD(1, v)
+#endif
+
+#ifndef __ATOMIC_DEC
+#define __ATOMIC_DEC(v) __ATOMIC_SUB(1, v)
+#endif
+
+#define DEF_ATOMIC_OPS(a_type, i_type) \
+static inline i_type a_type##_read(const a_type##_t *v) \
+{ \
+ return __ATOMIC_READ(&v->counter); \
+} \
+ \
+static inline void a_type##_set(a_type##_t *v, i_type i) \
+{ \
+ return __ATOMIC_SET(&v->counter, i); \
+} \
+ \
+static inline i_type a_type##_add_return(i_type i, a_type##_t *v) \
+{ \
+ return __ATOMIC_ADD_RETURN(i, &v->counter); \
+} \
+ \
+static inline i_type a_type##_add_return_release(i_type i, a_type##_t *v)\
+{ \
+ return __ATOMIC_ADD_RETURN_RELEASE(i, &v->counter); \
+} \
+ \
+static inline i_type a_type##_sub_return(i_type i, a_type##_t *v) \
+{ \
+ return __ATOMIC_SUB_RETURN(i, &v->counter); \
+} \
+ \
+static inline void a_type##_add(i_type i, a_type##_t *v) \
+{ \
+ __ATOMIC_ADD(i, &v->counter); \
+} \
+ \
+static inline void a_type##_sub(i_type i, a_type##_t *v) \
+{ \
+ __ATOMIC_SUB(i, &v->counter); \
+} \
+ \
+static inline i_type a_type##_inc_return(a_type##_t *v) \
+{ \
+ return __ATOMIC_INC_RETURN(&v->counter); \
+} \
+ \
+static inline i_type a_type##_dec_return(a_type##_t *v) \
+{ \
+ return __ATOMIC_DEC_RETURN(&v->counter); \
+} \
+ \
+static inline void a_type##_inc(a_type##_t *v) \
+{ \
+ __ATOMIC_INC(&v->counter); \
+} \
+ \
+static inline void a_type##_dec(a_type##_t *v) \
+{ \
+ __ATOMIC_DEC(&v->counter); \
+} \
+ \
+static inline bool a_type##_add_negative(i_type i, a_type##_t *v) \
+{ \
+ return __ATOMIC_ADD_RETURN(i, &v->counter) < 0; \
+} \
+ \
+static inline bool a_type##_sub_and_test(i_type i, a_type##_t *v) \
+{ \
+ return __ATOMIC_SUB_RETURN(i, &v->counter) == 0; \
+} \
+ \
+static inline bool a_type##_inc_and_test(a_type##_t *v) \
+{ \
+ return __ATOMIC_INC_RETURN(&v->counter) == 0; \
+} \
+ \
+static inline bool a_type##_dec_and_test(a_type##_t *v) \
+{ \
+ return __ATOMIC_DEC_RETURN(&v->counter) == 0; \
+} \
+ \
+static inline i_type a_type##_add_unless(a_type##_t *v, i_type a, i_type u)\
+{ \
+ i_type old, c = __ATOMIC_READ(&v->counter); \
+ while (c != u && (old = cmpxchg(&v->counter, c, c + a)) != c) \
+ c = old; \
+ return c; \
+} \
+ \
+static inline bool a_type##_inc_not_zero(a_type##_t *v) \
+{ \
+ return a_type##_add_unless(v, 1, 0); \
+} \
+ \
+static inline i_type a_type##_xchg(a_type##_t *v, i_type i) \
+{ \
+ return xchg(&v->counter, i); \
+} \
+ \
+static inline i_type a_type##_cmpxchg(a_type##_t *v, i_type old, i_type new)\
+{ \
+ return cmpxchg(&v->counter, old, new); \
+} \
+ \
+static inline i_type a_type##_cmpxchg_acquire(a_type##_t *v, i_type old, i_type new)\
+{ \
+ return cmpxchg_acquire(&v->counter, old, new); \
}
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- return __atomic_add_fetch(&v->counter, i, __ATOMIC_RELAXED);
-}
+DEF_ATOMIC_OPS(atomic, int)
+DEF_ATOMIC_OPS(atomic_long, long)
+
+#ifndef ATOMIC64_SPINLOCK
+DEF_ATOMIC_OPS(atomic64, s64)
+#else
+s64 atomic64_read(const atomic64_t *v);
+void atomic64_set(atomic64_t *v, s64);
+
+s64 atomic64_add_return(s64, atomic64_t *);
+s64 atomic64_sub_return(s64, atomic64_t *);
+void atomic64_add(s64, atomic64_t *);
+void atomic64_sub(s64, atomic64_t *);
+
+s64 atomic64_xchg(atomic64_t *, s64);
+s64 atomic64_cmpxchg(atomic64_t *, s64, s64);
+
+#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+#define atomic64_inc(v) atomic64_add(1LL, (v))
+#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
+#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+#define atomic64_dec(v) atomic64_sub(1LL, (v))
+#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
+#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- return __atomic_sub_fetch(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline int atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-
-static inline void atomic_add(int i, atomic_t *v)
-{
- atomic_add_return(i, v);
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- atomic_sub_return(i, v);
-}
-
-static inline void atomic_inc(atomic_t *v)
-{
- atomic_add(1, v);
-}
-
-static inline void atomic_dec(atomic_t *v)
-{
- atomic_sub(1, v);
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
- c = old;
- return c;
-}
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-typedef struct {
- long counter;
-} atomic_long_t;
-
-static inline long atomic_long_read(const atomic_long_t *v)
-{
- return __atomic_load_n(&v->counter, __ATOMIC_RELAXED);
-}
-
-static inline void atomic_long_set(atomic_long_t *v, long i)
-{
- __atomic_store_n(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *v)
-{
- return __atomic_add_fetch(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *v)
-{
- return __atomic_sub_fetch(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline void atomic_long_add(long i, atomic_long_t *v)
-{
- atomic_long_add_return(i, v);
-}
-
-static inline void atomic_long_sub(long i, atomic_long_t *v)
-{
- atomic_long_sub_return(i, v);
-}
-
-static inline void atomic_long_inc(atomic_long_t *v)
-{
- atomic_long_add(1, v);
-}
-
-static inline void atomic_long_dec(atomic_long_t *v)
-{
- atomic_long_sub(1, v);
-}
-
-static inline long atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
- return cmpxchg(&v->counter, old, new);
-}
-
-static inline bool atomic_long_inc_not_zero(atomic_long_t *i)
-{
- long old, v = atomic_long_read(i);
-
- do {
- if (!(old = v))
- return false;
- } while ((v = atomic_long_cmpxchg(i, old, old + 1)) != old);
-
- return true;
-}
-
-#define atomic_long_sub_and_test(i, v) (atomic_long_sub_return((i), (v)) == 0)
-
-typedef struct {
- u64 counter;
-} atomic64_t;
-
-static inline s64 atomic64_read(const atomic64_t *v)
-{
- return __atomic_load_n(&v->counter, __ATOMIC_RELAXED);
-}
-
-static inline void atomic64_set(atomic64_t *v, s64 i)
-{
- __atomic_store_n(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
-{
- return __atomic_add_fetch(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
-{
- return __atomic_sub_fetch(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline void atomic64_add(s64 i, atomic64_t *v)
-{
- atomic64_add_return(i, v);
-}
-
-static inline void atomic64_sub(s64 i, atomic64_t *v)
-{
- atomic64_sub_return(i, v);
-}
-
-static inline void atomic64_inc(atomic64_t *v)
-{
- atomic64_add(1, v);
-}
-
-static inline void atomic64_dec(atomic64_t *v)
-{
- atomic64_sub(1, v);
-}
-
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
-static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+static inline s64 atomic64_add_return_release(s64 i, atomic64_t *v)
{
- return cmpxchg(&v->counter, old, new);
+ smp_mb__before_atomic();
+ return atomic64_add_return(i, v);
}
static inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
- return cmpxchg_acquire(&v->counter, old, new);
+ return atomic64_cmpxchg(v, old, new);
}
-static inline s64 atomic64_add_return_release(s64 i, atomic64_t *v)
-{
- return __atomic_add_fetch(&v->counter, i, __ATOMIC_RELEASE);
-}
+#endif
#endif /* __TOOLS_LINUX_ATOMIC_H */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 01b2c153..d8a86b45 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_BACKING_DEV_H
#define _LINUX_BACKING_DEV_H
+#include <linux/list.h>
+
typedef int (congested_fn)(void *, int);
enum wb_congested_state {
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 4ee609ae..c61167ca 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -1,7 +1,8 @@
#ifndef __TOOLS_LINUX_CACHE_H
#define __TOOLS_LINUX_CACHE_H
-#define L1_CACHE_BYTES 64
+#define L1_CACHE_SHIFT 6
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b569b2c1..15b803ea 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -5,7 +5,7 @@ struct super_block;
struct inode;
/* The hash is always the low bits of hash_len */
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define HASH_LEN_DECLARE u32 hash; u32 len
#else
#define HASH_LEN_DECLARE u32 len; u32 hash
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index dc778f1c..b6afea43 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -111,17 +111,11 @@
#define cpu_relax() do {} while (0)
#define cpu_relax_lowlatency() do {} while (0)
-__printf(1, 2)
-static inline void panic(const char *fmt, ...)
-{
- va_list args;
-
- va_start(args, fmt);
- vprintf(fmt, args);
- va_end(args);
-
- BUG();
-}
+#define panic(fmt, ...) \
+do { \
+ printf(fmt, ##__VA_ARGS__); \
+ BUG(); \
+} while (0)
unsigned long simple_strtoul(const char *,char **,unsigned int);
long simple_strtol(const char *,char **,unsigned int);
diff --git a/include/linux/page.h b/include/linux/page.h
index 8d6413ce..87be064f 100644
--- a/include/linux/page.h
+++ b/include/linux/page.h
@@ -5,6 +5,13 @@
struct page;
+#ifndef PAGE_SIZE
+
+#define PAGE_SIZE 4096UL
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#endif
+
#define virt_to_page(p) \
((struct page *) (((unsigned long) (p)) & PAGE_MASK))
#define offset_in_page(p) ((unsigned long) (p) & ~PAGE_MASK)