summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/include/asm/local.h17
-rw-r--r--arch/m32r/include/asm/local.h25
-rw-r--r--arch/mips/include/asm/local.h25
-rw-r--r--arch/powerpc/include/asm/local.h25
-rw-r--r--arch/x86/include/asm/local.h37
-rw-r--r--include/asm-generic/local.h19
6 files changed, 0 insertions, 148 deletions
diff --git a/arch/alpha/include/asm/local.h b/arch/alpha/include/asm/local.h
index 6ad3ea696421..b9e3e3318371 100644
--- a/arch/alpha/include/asm/local.h
+++ b/arch/alpha/include/asm/local.h
@@ -98,21 +98,4 @@ static __inline__ long local_sub_return(long i, local_t * l)
#define __local_add(i,l) ((l)->a.counter+=(i))
#define __local_sub(i,l) ((l)->a.counter-=(i))
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable, not an address.
- */
-#define cpu_local_read(l) local_read(&__get_cpu_var(l))
-#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
-
-#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
-#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
-#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
-#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
-
-#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
-#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
-#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
-#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
-
#endif /* _ALPHA_LOCAL_H */
diff --git a/arch/m32r/include/asm/local.h b/arch/m32r/include/asm/local.h
index 22256d138630..734bca87018a 100644
--- a/arch/m32r/include/asm/local.h
+++ b/arch/m32r/include/asm/local.h
@@ -338,29 +338,4 @@ static inline void local_set_mask(unsigned long mask, local_t *addr)
* a variable, not an address.
*/
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non local way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* __M32R_LOCAL_H */
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 361f4f16c30c..bdcdef02d147 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -193,29 +193,4 @@ static __inline__ long local_sub_return(long i, local_t * l)
#define __local_add(i, l) ((l)->a.counter+=(i))
#define __local_sub(i, l) ((l)->a.counter-=(i))
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* _ARCH_MIPS_LOCAL_H */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index 84b457a3c1bc..227753d288f6 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -172,29 +172,4 @@ static __inline__ long local_dec_if_positive(local_t *l)
#define __local_add(i,l) ((l)->a.counter+=(i))
#define __local_sub(i,l) ((l)->a.counter-=(i))
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* _ARCH_POWERPC_LOCAL_H */
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 47b9b6f19057..2e9972468a5d 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -195,41 +195,4 @@ static inline long local_sub_return(long i, local_t *l)
#define __local_add(i, l) local_add((i), (l))
#define __local_sub(i, l) local_sub((i), (l))
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable, not an address.
- *
- * X86_64: This could be done better if we moved the per cpu data directly
- * after GS.
- */
-
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
-({ \
- local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; \
-})
-#define cpu_local_wrap(l) \
-({ \
- preempt_disable(); \
- (l); \
- preempt_enable(); \
-}) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
-
-#define __cpu_local_inc(l) cpu_local_inc((l))
-#define __cpu_local_dec(l) cpu_local_dec((l))
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* _ASM_X86_LOCAL_H */
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index fc218444e315..c8a5d68541d7 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -52,23 +52,4 @@ typedef struct
#define __local_add(i,l) local_set((l), local_read(l) + (i))
#define __local_sub(i,l) local_set((l), local_read(l) - (i))
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable (eg. mystruct.foo), not an address.
- */
-#define cpu_local_read(l) local_read(&__get_cpu_var(l))
-#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
-#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
-#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
-#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
-#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
-
-/* Non-atomic increments, ie. preemption disabled and won't be touched
- * in interrupt, etc. Some archs can optimize this case well.
- */
-#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
-#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
-#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
-#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
-
#endif /* _ASM_GENERIC_LOCAL_H */