summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-06-03 17:30:20 -0700
committerIngo Molnar <mingo@elte.hu>2008-06-10 12:05:35 +0200
commitb3a0cb456d848e10b2f7b371ba05e44f1384520a (patch)
tree184037cf9d1cb63f36fbd3c5a84e17c861322d15
parent63cc8c75156462d4b42cbdd76c293b7eee7ddbfe (diff)
x86: extend percpu ops to 64 bit
* x86 percpu ops now will work on 64 bit too, so add the missing 8 byte cases. * Add a few atomic ops that will be useful in the future: x86_xchg_percpu() x86_cmpxchg_percpu(). x86_inc_percpu() - Increment by one can generate more efficient x86_dec_percpu() instructions and inc/dec will be supported by cpu ops later. * Use per_cpu_var() instead of per_cpu__##xxx. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/asm-x86/percpu.h83
1 files changed, 78 insertions, 5 deletions
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h
index 736fc3bb8e1e..d85e5aaca7b1 100644
--- a/include/asm-x86/percpu.h
+++ b/include/asm-x86/percpu.h
@@ -108,6 +108,11 @@ do { \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
+ case 8: \
+ asm(op "q %1,"__percpu_seg"%0" \
+ : "+m" (var) \
+ : "ri" ((T__)val)); \
+ break; \
default: __bad_percpu_size(); \
} \
} while (0)
@@ -131,16 +136,84 @@ do { \
: "=r" (ret__) \
: "m" (var)); \
break; \
+ case 8: \
+ asm(op "q "__percpu_seg"%1,%0" \
+ : "=r" (ret__) \
+ : "m" (var)); \
+ break; \
default: __bad_percpu_size(); \
} \
ret__; \
})
-#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
-#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
-#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
-#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
-#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
+#define percpu_addr_op(op, var) \
+({ \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm(op "b "__percpu_seg"%0" \
+ : : "m"(var)); \
+ break; \
+ case 2: \
+ asm(op "w "__percpu_seg"%0" \
+ : : "m"(var)); \
+ break; \
+ case 4: \
+ asm(op "l "__percpu_seg"%0" \
+ : : "m"(var)); \
+ break; \
+ case 8: \
+ asm(op "q "__percpu_seg"%0" \
+ : : "m"(var)); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+})
+
+#define percpu_cmpxchg_op(var, old, new) \
+({ \
+ typeof(var) prev; \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm("cmpxchgb %b1, "__percpu_seg"%2" \
+ : "=a"(prev) \
+ : "q"(new), "m"(var), "0"(old) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm("cmpxchgw %w1, "__percpu_seg"%2" \
+ : "=a"(prev) \
+ : "r"(new), "m"(var), "0"(old) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm("cmpxchgl %k1, "__percpu_seg"%2" \
+ : "=a"(prev) \
+ : "r"(new), "m"(var), "0"(old) \
+ : "memory"); \
+ break; \
+ case 8: \
+ asm("cmpxchgq %1, "__percpu_seg"%2" \
+ : "=a"(prev) \
+ : "r"(new), "m"(var), "0"(old) \
+ : "memory"); \
+ break; \
+ default: \
+ __bad_percpu_size(); \
+ } \
+ return prev; \
+})
+
+#define x86_read_percpu(var) percpu_from_op("mov", per_cpu_var(var))
+#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu_var(var), val)
+#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu_var(var), val)
+#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu_var(var), val)
+#define x86_inc_percpu(var) percpu_addr_op("inc", per_cpu_var(var))
+#define x86_dec_percpu(var) percpu_addr_op("dec", per_cpu_var(var))
+#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu_var(var), val)
+#define x86_xchg_percpu(var, val) percpu_to_op("xchg", per_cpu_var(var), val)
+#define x86_cmpxchg_percpu(var, old, new) \
+ percpu_cmpxchg_op(per_cpu_var(var), old, new)
+
#endif /* !__ASSEMBLY__ */
#endif /* !CONFIG_X86_64 */
#endif /* _ASM_X86_PERCPU_H_ */