summaryrefslogtreecommitdiff
path: root/arch/x86/include
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-10-11 12:38:27 +0200
committerBen Hutchings <ben@decadent.org.uk>2018-12-16 22:09:41 +0000
commit4f78f1df7fc63b55a6e9fe9184f2d156aeac32c9 (patch)
treea6cc22e7347fc91d61eb821b726f0724ea7f8c85 /arch/x86/include
parentb427832009b97a3ee412ef643a80b15372a7754d (diff)
x86/percpu: Fix this_cpu_read()
commit b59167ac7bafd804c91e49ad53c6d33a7394d4c8 upstream. Eric reported that a sequence count loop using this_cpu_read() got optimized out. This is wrong, this_cpu_read() must imply READ_ONCE() because the interface is IRQ-safe, therefore an interrupt can have changed the per-cpu value. Fixes: 7c3576d261ce ("[PATCH] i386: Convert PDA into the percpu section") Reported-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Eric Dumazet <edumazet@google.com> Cc: hpa@zytor.com Cc: eric.dumazet@gmail.com Cc: bp@alien8.de Link: https://lkml.kernel.org/r/20181011104019.748208519@infradead.org [bwh: Backported to 3.16: adjust context] Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/percpu.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 851bcdc5db04..83ad4424d711 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -185,22 +185,22 @@ do { \
typeof(var) pfo_ret__; \
switch (sizeof(var)) { \
case 1: \
- asm(op "b "__percpu_arg(1)",%0" \
+ asm volatile(op "b "__percpu_arg(1)",%0"\
: "=q" (pfo_ret__) \
: constraint); \
break; \
case 2: \
- asm(op "w "__percpu_arg(1)",%0" \
+ asm volatile(op "w "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: constraint); \
break; \
case 4: \
- asm(op "l "__percpu_arg(1)",%0" \
+ asm volatile(op "l "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: constraint); \
break; \
case 8: \
- asm(op "q "__percpu_arg(1)",%0" \
+ asm volatile(op "q "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: constraint); \
break; \