summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2020-10-09 08:35:01 +0200
committerIngo Molnar <mingo@kernel.org>2020-10-09 08:56:02 +0200
commitd6c4c11348816fb4d16e33bf47d559d7aa59350a (patch)
tree14d6c6a8d7b88b4f06647717320562b6e525668a
parente705d397965811ac528d7213b42d74ffe43caf38 (diff)
parentcd290ec24633f51029dab0d25505fae7da0e1eda (diff)
Merge branch 'kcsan' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into locking/core
Pull KCSAN updates for v5.10 from Paul E. McKenney: - Improve kernel messages. - Be more permissive with bitops races under KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=y. - Optimize debugfs stat counters. - Introduce the instrument_*read_write() annotations, to provide a finer description of certain ops - using KCSAN's compound instrumentation. Use them for atomic RNW and bitops, where appropriate. Doing this might find new races. (Depends on the compiler having tsan-compound-read-before-write=1 support.) - Support atomic built-ins, which will help certain architectures, such as s390. - Misc enhancements and smaller fixes. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/asm-generic/atomic-instrumented.h330
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h6
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h2
-rw-r--r--include/asm-generic/bitops/instrumented-non-atomic.h30
-rw-r--r--include/linux/instrumented.h30
-rw-r--r--include/linux/kcsan-checks.h45
-rw-r--r--kernel/kcsan/core.c210
-rw-r--r--kernel/kcsan/debugfs.c130
-rw-r--r--kernel/kcsan/kcsan-test.c128
-rw-r--r--kernel/kcsan/kcsan.h12
-rw-r--r--kernel/kcsan/report.c10
-rw-r--r--kernel/kcsan/selftest.c8
-rw-r--r--lib/Kconfig.kcsan5
-rw-r--r--scripts/Makefile.kcsan2
-rwxr-xr-xscripts/atomic/gen-atomic-instrumented.sh21
-rw-r--r--tools/objtool/check.c55
16 files changed, 677 insertions, 347 deletions
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index 379986e40159..cd223b68b69d 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -60,7 +60,7 @@ atomic_set_release(atomic_t *v, int i)
static __always_inline void
atomic_add(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_add(i, v);
}
#define atomic_add atomic_add
@@ -69,7 +69,7 @@ atomic_add(int i, atomic_t *v)
static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return(i, v);
}
#define atomic_add_return atomic_add_return
@@ -79,7 +79,7 @@ atomic_add_return(int i, atomic_t *v)
static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_acquire(i, v);
}
#define atomic_add_return_acquire atomic_add_return_acquire
@@ -89,7 +89,7 @@ atomic_add_return_acquire(int i, atomic_t *v)
static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_release(i, v);
}
#define atomic_add_return_release atomic_add_return_release
@@ -99,7 +99,7 @@ atomic_add_return_release(int i, atomic_t *v)
static __always_inline int
atomic_add_return_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_relaxed(i, v);
}
#define atomic_add_return_relaxed atomic_add_return_relaxed
@@ -109,7 +109,7 @@ atomic_add_return_relaxed(int i, atomic_t *v)
static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add(i, v);
}
#define atomic_fetch_add atomic_fetch_add
@@ -119,7 +119,7 @@ atomic_fetch_add(int i, atomic_t *v)
static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_acquire(i, v);
}
#define atomic_fetch_add_acquire atomic_fetch_add_acquire
@@ -129,7 +129,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v)
static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_release(i, v);
}
#define atomic_fetch_add_release atomic_fetch_add_release
@@ -139,7 +139,7 @@ atomic_fetch_add_release(int i, atomic_t *v)
static __always_inline int
atomic_fetch_add_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_relaxed(i, v);
}
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
@@ -148,7 +148,7 @@ atomic_fetch_add_relaxed(int i, atomic_t *v)
static __always_inline void
atomic_sub(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_sub(i, v);
}
#define atomic_sub atomic_sub
@@ -157,7 +157,7 @@ atomic_sub(int i, atomic_t *v)
static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return(i, v);
}
#define atomic_sub_return atomic_sub_return
@@ -167,7 +167,7 @@ atomic_sub_return(int i, atomic_t *v)
static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_acquire(i, v);
}
#define atomic_sub_return_acquire atomic_sub_return_acquire
@@ -177,7 +177,7 @@ atomic_sub_return_acquire(int i, atomic_t *v)
static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_release(i, v);
}
#define atomic_sub_return_release atomic_sub_return_release
@@ -187,7 +187,7 @@ atomic_sub_return_release(int i, atomic_t *v)
static __always_inline int
atomic_sub_return_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_relaxed(i, v);
}
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
@@ -197,7 +197,7 @@ atomic_sub_return_relaxed(int i, atomic_t *v)
static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub(i, v);
}
#define atomic_fetch_sub atomic_fetch_sub
@@ -207,7 +207,7 @@ atomic_fetch_sub(int i, atomic_t *v)
static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_acquire(i, v);
}
#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
@@ -217,7 +217,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v)
static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_release(i, v);
}
#define atomic_fetch_sub_release atomic_fetch_sub_release
@@ -227,7 +227,7 @@ atomic_fetch_sub_release(int i, atomic_t *v)
static __always_inline int
atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_relaxed(i, v);
}
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
@@ -237,7 +237,7 @@ atomic_fetch_sub_relaxed(int i, atomic_t *v)
static __always_inline void
atomic_inc(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_inc(v);
}
#define atomic_inc atomic_inc
@@ -247,7 +247,7 @@ atomic_inc(atomic_t *v)
static __always_inline int
atomic_inc_return(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}
#define atomic_inc_return atomic_inc_return
@@ -257,7 +257,7 @@ atomic_inc_return(atomic_t *v)
static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_acquire(v);
}
#define atomic_inc_return_acquire atomic_inc_return_acquire
@@ -267,7 +267,7 @@ atomic_inc_return_acquire(atomic_t *v)
static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_release(v);
}
#define atomic_inc_return_release atomic_inc_return_release
@@ -277,7 +277,7 @@ atomic_inc_return_release(atomic_t *v)
static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_relaxed(v);
}
#define atomic_inc_return_relaxed atomic_inc_return_relaxed
@@ -287,7 +287,7 @@ atomic_inc_return_relaxed(atomic_t *v)
static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc(v);
}
#define atomic_fetch_inc atomic_fetch_inc
@@ -297,7 +297,7 @@ atomic_fetch_inc(atomic_t *v)
static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_acquire(v);
}
#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
@@ -307,7 +307,7 @@ atomic_fetch_inc_acquire(atomic_t *v)
static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_release(v);
}
#define atomic_fetch_inc_release atomic_fetch_inc_release
@@ -317,7 +317,7 @@ atomic_fetch_inc_release(atomic_t *v)
static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_relaxed(v);
}
#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
@@ -327,7 +327,7 @@ atomic_fetch_inc_relaxed(atomic_t *v)
static __always_inline void
atomic_dec(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_dec(v);
}
#define atomic_dec atomic_dec
@@ -337,7 +337,7 @@ atomic_dec(atomic_t *v)
static __always_inline int
atomic_dec_return(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}
#define atomic_dec_return atomic_dec_return
@@ -347,7 +347,7 @@ atomic_dec_return(atomic_t *v)
static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_acquire(v);
}
#define atomic_dec_return_acquire atomic_dec_return_acquire
@@ -357,7 +357,7 @@ atomic_dec_return_acquire(atomic_t *v)
static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_release(v);
}
#define atomic_dec_return_release atomic_dec_return_release
@@ -367,7 +367,7 @@ atomic_dec_return_release(atomic_t *v)
static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_relaxed(v);
}
#define atomic_dec_return_relaxed atomic_dec_return_relaxed
@@ -377,7 +377,7 @@ atomic_dec_return_relaxed(atomic_t *v)
static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec(v);
}
#define atomic_fetch_dec atomic_fetch_dec
@@ -387,7 +387,7 @@ atomic_fetch_dec(atomic_t *v)
static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_acquire(v);
}
#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
@@ -397,7 +397,7 @@ atomic_fetch_dec_acquire(atomic_t *v)
static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_release(v);
}
#define atomic_fetch_dec_release atomic_fetch_dec_release
@@ -407,7 +407,7 @@ atomic_fetch_dec_release(atomic_t *v)
static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_relaxed(v);
}
#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
@@ -416,7 +416,7 @@ atomic_fetch_dec_relaxed(atomic_t *v)
static __always_inline void
atomic_and(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_and(i, v);
}
#define atomic_and atomic_and
@@ -425,7 +425,7 @@ atomic_and(int i, atomic_t *v)
static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and(i, v);
}
#define atomic_fetch_and atomic_fetch_and
@@ -435,7 +435,7 @@ atomic_fetch_and(int i, atomic_t *v)
static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_acquire(i, v);
}
#define atomic_fetch_and_acquire atomic_fetch_and_acquire
@@ -445,7 +445,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v)
static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_release(i, v);
}
#define atomic_fetch_and_release atomic_fetch_and_release
@@ -455,7 +455,7 @@ atomic_fetch_and_release(int i, atomic_t *v)
static __always_inline int
atomic_fetch_and_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_relaxed(i, v);
}
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
@@ -465,7 +465,7 @@ atomic_fetch_and_relaxed(int i, atomic_t *v)
static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_andnot(i, v);
}
#define atomic_andnot atomic_andnot
@@ -475,7 +475,7 @@ atomic_andnot(int i, atomic_t *v)
static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot(i, v);
}
#define atomic_fetch_andnot atomic_fetch_andnot
@@ -485,7 +485,7 @@ atomic_fetch_andnot(int i, atomic_t *v)
static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_acquire(i, v);
}
#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
@@ -495,7 +495,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v)
static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_release(i, v);
}
#define atomic_fetch_andnot_release atomic_fetch_andnot_release
@@ -505,7 +505,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v)
static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_relaxed(i, v);
}
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
@@ -514,7 +514,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v)
static __always_inline void
atomic_or(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_or(i, v);
}
#define atomic_or atomic_or
@@ -523,7 +523,7 @@ atomic_or(int i, atomic_t *v)
static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or(i, v);
}
#define atomic_fetch_or atomic_fetch_or
@@ -533,7 +533,7 @@ atomic_fetch_or(int i, atomic_t *v)
static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_acquire(i, v);
}
#define atomic_fetch_or_acquire atomic_fetch_or_acquire
@@ -543,7 +543,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v)
static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_release(i, v);
}
#define atomic_fetch_or_release atomic_fetch_or_release
@@ -553,7 +553,7 @@ atomic_fetch_or_release(int i, atomic_t *v)
static __always_inline int
atomic_fetch_or_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_relaxed(i, v);
}
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
@@ -562,7 +562,7 @@ atomic_fetch_or_relaxed(int i, atomic_t *v)
static __always_inline void
atomic_xor(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_xor(i, v);
}
#define atomic_xor atomic_xor
@@ -571,7 +571,7 @@ atomic_xor(int i, atomic_t *v)
static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor(i, v);
}
#define atomic_fetch_xor atomic_fetch_xor
@@ -581,7 +581,7 @@ atomic_fetch_xor(int i, atomic_t *v)
static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_acquire(i, v);
}
#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
@@ -591,7 +591,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v)
static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_release(i, v);
}
#define atomic_fetch_xor_release atomic_fetch_xor_release
@@ -601,7 +601,7 @@ atomic_fetch_xor_release(int i, atomic_t *v)
static __always_inline int
atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_relaxed(i, v);
}
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
@@ -611,7 +611,7 @@ atomic_fetch_xor_relaxed(int i, atomic_t *v)
static __always_inline int
atomic_xchg(atomic_t *v, int i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg(v, i);
}
#define atomic_xchg atomic_xchg
@@ -621,7 +621,7 @@ atomic_xchg(atomic_t *v, int i)
static __always_inline int
atomic_xchg_acquire(atomic_t *v, int i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_acquire(v, i);
}
#define atomic_xchg_acquire atomic_xchg_acquire
@@ -631,7 +631,7 @@ atomic_xchg_acquire(atomic_t *v, int i)
static __always_inline int
atomic_xchg_release(atomic_t *v, int i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_release(v, i);
}
#define atomic_xchg_release atomic_xchg_release
@@ -641,7 +641,7 @@ atomic_xchg_release(atomic_t *v, int i)
static __always_inline int
atomic_xchg_relaxed(atomic_t *v, int i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_relaxed(v, i);
}
#define atomic_xchg_relaxed atomic_xchg_relaxed
@@ -651,7 +651,7 @@ atomic_xchg_relaxed(atomic_t *v, int i)
static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg(v, old, new);
}
#define atomic_cmpxchg atomic_cmpxchg
@@ -661,7 +661,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new)
static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_acquire(v, old, new);
}
#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
@@ -671,7 +671,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_release(v, old, new);
}
#define atomic_cmpxchg_release atomic_cmpxchg_release
@@ -681,7 +681,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new)
static __always_inline int
atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_relaxed(v, old, new);
}
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
@@ -691,8 +691,8 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg(v, old, new);
}
#define atomic_try_cmpxchg atomic_try_cmpxchg
@@ -702,8 +702,8 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_acquire(v, old, new);
}
#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
@@ -713,8 +713,8 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_release(v, old, new);
}
#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
@@ -724,8 +724,8 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
}
#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
@@ -735,7 +735,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
#define atomic_sub_and_test atomic_sub_and_test
@@ -745,7 +745,7 @@ atomic_sub_and_test(int i, atomic_t *v)
static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
#define atomic_dec_and_test atomic_dec_and_test
@@ -755,7 +755,7 @@ atomic_dec_and_test(atomic_t *v)
static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
#define atomic_inc_and_test atomic_inc_and_test
@@ -765,7 +765,7 @@ atomic_inc_and_test(atomic_t *v)
static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
#define atomic_add_negative atomic_add_negative
@@ -775,7 +775,7 @@ atomic_add_negative(int i, atomic_t *v)
static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_unless(v, a, u);
}
#define atomic_fetch_add_unless atomic_fetch_add_unless
@@ -785,7 +785,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u)
static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_unless(v, a, u);
}
#define atomic_add_unless atomic_add_unless
@@ -795,7 +795,7 @@ atomic_add_unless(atomic_t *v, int a, int u)
static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_not_zero(v);
}
#define atomic_inc_not_zero atomic_inc_not_zero
@@ -805,7 +805,7 @@ atomic_inc_not_zero(atomic_t *v)
static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_unless_negative(v);
}
#define atomic_inc_unless_negative atomic_inc_unless_negative
@@ -815,7 +815,7 @@ atomic_inc_unless_negative(atomic_t *v)
static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_unless_positive(v);
}
#define atomic_dec_unless_positive atomic_dec_unless_positive
@@ -825,7 +825,7 @@ atomic_dec_unless_positive(atomic_t *v)
static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_if_positive(v);
}
#define atomic_dec_if_positive atomic_dec_if_positive
@@ -870,7 +870,7 @@ atomic64_set_release(atomic64_t *v, s64 i)
static __always_inline void
atomic64_add(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_add(i, v);
}
#define atomic64_add atomic64_add
@@ -879,7 +879,7 @@ atomic64_add(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return(i, v);
}
#define atomic64_add_return atomic64_add_return
@@ -889,7 +889,7 @@ atomic64_add_return(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_acquire(i, v);
}
#define atomic64_add_return_acquire atomic64_add_return_acquire
@@ -899,7 +899,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_release(i, v);
}
#define atomic64_add_return_release atomic64_add_return_release
@@ -909,7 +909,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_relaxed(i, v);
}
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
@@ -919,7 +919,7 @@ atomic64_add_return_relaxed(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add(i, v);
}
#define atomic64_fetch_add atomic64_fetch_add
@@ -929,7 +929,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_acquire(i, v);
}
#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
@@ -939,7 +939,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_release(i, v);
}
#define atomic64_fetch_add_release atomic64_fetch_add_release
@@ -949,7 +949,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_relaxed(i, v);
}
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
@@ -958,7 +958,7 @@ atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
static __always_inline void
atomic64_sub(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_sub(i, v);
}
#define atomic64_sub atomic64_sub
@@ -967,7 +967,7 @@ atomic64_sub(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return(i, v);
}
#define atomic64_sub_return atomic64_sub_return
@@ -977,7 +977,7 @@ atomic64_sub_return(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_acquire(i, v);
}
#define atomic64_sub_return_acquire atomic64_sub_return_acquire
@@ -987,7 +987,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_release(i, v);
}
#define atomic64_sub_return_release atomic64_sub_return_release
@@ -997,7 +997,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_relaxed(i, v);
}
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
@@ -1007,7 +1007,7 @@ atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub(i, v);
}
#define atomic64_fetch_sub atomic64_fetch_sub
@@ -1017,7 +1017,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_acquire(i, v);
}
#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
@@ -1027,7 +1027,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_release(i, v);
}
#define atomic64_fetch_sub_release atomic64_fetch_sub_release
@@ -1037,7 +1037,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_relaxed(i, v);
}
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
@@ -1047,7 +1047,7 @@ atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
static __always_inline void
atomic64_inc(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
#define atomic64_inc atomic64_inc
@@ -1057,7 +1057,7 @@ atomic64_inc(atomic64_t *v)
static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}
#define atomic64_inc_return atomic64_inc_return
@@ -1067,7 +1067,7 @@ atomic64_inc_return(atomic64_t *v)
static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_acquire(v);
}
#define atomic64_inc_return_acquire atomic64_inc_return_acquire
@@ -1077,7 +1077,7 @@ atomic64_inc_return_acquire(atomic64_t *v)
static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_release(v);
}
#define atomic64_inc_return_release atomic64_inc_return_release
@@ -1087,7 +1087,7 @@ atomic64_inc_return_release(atomic64_t *v)
static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_relaxed(v);
}
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
@@ -1097,7 +1097,7 @@ atomic64_inc_return_relaxed(atomic64_t *v)
static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc(v);
}
#define atomic64_fetch_inc atomic64_fetch_inc
@@ -1107,7 +1107,7 @@ atomic64_fetch_inc(atomic64_t *v)
static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_acquire(v);
}
#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
@@ -1117,7 +1117,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v)
static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_release(v);
}
#define atomic64_fetch_inc_release atomic64_fetch_inc_release
@@ -1127,7 +1127,7 @@ atomic64_fetch_inc_release(atomic64_t *v)
static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_relaxed(v);
}
#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
@@ -1137,7 +1137,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v)
static __always_inline void
atomic64_dec(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_dec(v);
}
#define atomic64_dec atomic64_dec
@@ -1147,7 +1147,7 @@ atomic64_dec(atomic64_t *v)
static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}
#define atomic64_dec_return atomic64_dec_return
@@ -1157,7 +1157,7 @@ atomic64_dec_return(atomic64_t *v)
static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_acquire(v);
}
#define atomic64_dec_return_acquire atomic64_dec_return_acquire
@@ -1167,7 +1167,7 @@ atomic64_dec_return_acquire(atomic64_t *v)
static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_release(v);
}
#define atomic64_dec_return_release atomic64_dec_return_release
@@ -1177,7 +1177,7 @@ atomic64_dec_return_release(atomic64_t *v)
static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_relaxed(v);
}
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
@@ -1187,7 +1187,7 @@ atomic64_dec_return_relaxed(atomic64_t *v)
static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec(v);
}
#define atomic64_fetch_dec atomic64_fetch_dec
@@ -1197,7 +1197,7 @@ atomic64_fetch_dec(atomic64_t *v)
static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_acquire(v);
}
#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
@@ -1207,7 +1207,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v)
static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_release(v);
}
#define atomic64_fetch_dec_release atomic64_fetch_dec_release
@@ -1217,7 +1217,7 @@ atomic64_fetch_dec_release(atomic64_t *v)
static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_relaxed(v);
}
#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
@@ -1226,7 +1226,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v)
static __always_inline void
atomic64_and(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_and(i, v);
}
#define atomic64_and atomic64_and
@@ -1235,7 +1235,7 @@ atomic64_and(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and(i, v);
}
#define atomic64_fetch_and atomic64_fetch_and
@@ -1245,7 +1245,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_acquire(i, v);
}
#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
@@ -1255,7 +1255,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_release(i, v);
}
#define atomic64_fetch_and_release atomic64_fetch_and_release
@@ -1265,7 +1265,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_relaxed(i, v);
}
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
@@ -1275,7 +1275,7 @@ atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_andnot(i, v);
}
#define atomic64_andnot atomic64_andnot
@@ -1285,7 +1285,7 @@ atomic64_andnot(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot(i, v);
}
#define atomic64_fetch_andnot atomic64_fetch_andnot
@@ -1295,7 +1295,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_acquire(i, v);
}
#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
@@ -1305,7 +1305,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_release(i, v);
}
#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
@@ -1315,7 +1315,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_relaxed(i, v);
}
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
@@ -1324,7 +1324,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
static __always_inline void
atomic64_or(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_or(i, v);
}
#define atomic64_or atomic64_or
@@ -1333,7 +1333,7 @@ atomic64_or(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or(i, v);
}
#define atomic64_fetch_or atomic64_fetch_or
@@ -1343,7 +1343,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_acquire(i, v);
}
#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
@@ -1353,7 +1353,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_release(i, v);
}
#define atomic64_fetch_or_release atomic64_fetch_or_release
@@ -1363,7 +1363,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_relaxed(i, v);
}
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
@@ -1372,7 +1372,7 @@ atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
static __always_inline void
atomic64_xor(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_xor(i, v);
}
#define atomic64_xor atomic64_xor
@@ -1381,7 +1381,7 @@ atomic64_xor(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor(i, v);
}
#define atomic64_fetch_xor atomic64_fetch_xor
@@ -1391,7 +1391,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_acquire(i, v);
}
#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
@@ -1401,7 +1401,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_release(i, v);
}
#define atomic64_fetch_xor_release atomic64_fetch_xor_release
@@ -1411,7 +1411,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_relaxed(i, v);
}
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
@@ -1421,7 +1421,7 @@ atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_xchg(atomic64_t *v, s64 i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg(v, i);
}
#define atomic64_xchg atomic64_xchg
@@ -1431,7 +1431,7 @@ atomic64_xchg(atomic64_t *v, s64 i)
static __always_inline s64
atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg_acquire(v, i);
}
#define atomic64_xchg_acquire atomic64_xchg_acquire
@@ -1441,7 +1441,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i)
static __always_inline s64
atomic64_xchg_release(atomic64_t *v, s64 i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg_release(v, i);
}
#define atomic64_xchg_release atomic64_xchg_release
@@ -1451,7 +1451,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i)
static __always_inline s64
atomic64_xchg_relaxed(atomic64_t *v, s64 i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg_relaxed(v, i);
}
#define atomic64_xchg_relaxed atomic64_xchg_relaxed
@@ -1461,7 +1461,7 @@ atomic64_xchg_relaxed(atomic64_t *v, s64 i)
static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg(v, old, new);
}
#define atomic64_cmpxchg atomic64_cmpxchg
@@ -1471,7 +1471,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_acquire(v, old, new);
}
#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
@@ -1481,7 +1481,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_release(v, old, new);
}
#define atomic64_cmpxchg_release atomic64_cmpxchg_release
@@ -1491,7 +1491,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
static __always_inline s64
atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_relaxed(v, old, new);
}
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
@@ -1501,8 +1501,8 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg(v, old, new);
}
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
@@ -1512,8 +1512,8 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_acquire(v, old, new);
}
#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
@@ -1523,8 +1523,8 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_release(v, old, new);
}
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
@@ -1534,8 +1534,8 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
}
#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
@@ -1545,7 +1545,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
#define atomic64_sub_and_test atomic64_sub_and_test
@@ -1555,7 +1555,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v)
static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
#define atomic64_dec_and_test atomic64_dec_and_test
@@ -1565,7 +1565,7 @@ atomic64_dec_and_test(atomic64_t *v)
static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
#define atomic64_inc_and_test atomic64_inc_and_test
@@ -1575,7 +1575,7 @@ atomic64_inc_and_test(atomic64_t *v)
static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
#define atomic64_add_negative atomic64_add_negative
@@ -1585,7 +1585,7 @@ atomic64_add_negative(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_unless(v, a, u);
}
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
@@ -1595,7 +1595,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_unless(v, a, u);
}
#define atomic64_add_unless atomic64_add_unless
@@ -1605,7 +1605,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}
#define atomic64_inc_not_zero atomic64_inc_not_zero
@@ -1615,7 +1615,7 @@ atomic64_inc_not_zero(atomic64_t *v)
static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_unless_negative(v);
}
#define atomic64_inc_unless_negative atomic64_inc_unless_negative
@@ -1625,7 +1625,7 @@ atomic64_inc_unless_negative(atomic64_t *v)
static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_unless_positive(v);
}
#define atomic64_dec_unless_positive atomic64_dec_unless_positive
@@ -1635,7 +1635,7 @@ atomic64_dec_unless_positive(atomic64_t *v)
static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_if_positive(v);
}
#define atomic64_dec_if_positive atomic64_dec_if_positive
@@ -1786,4 +1786,4 @@ atomic64_dec_if_positive(atomic64_t *v)
})
#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// 89bf97f3a7509b740845e51ddf31055b48a81f40
+// 9d5e6a315fb1335d02f0ccd3655a91c3dafcc63e
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
index fb2cb33a4013..81915dcd4b4e 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -67,7 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
@@ -80,7 +80,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
@@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
index b9bec468ae03..75ef606f7145 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -52,7 +52,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit_lock(nr, addr);
}
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
index 20f788a25ef9..37363d570b9b 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -58,6 +58,30 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
arch___change_bit(nr, addr);
}
+static inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
+{
+ if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) {
+ /*
+ * We treat non-atomic read-write bitops a little more special.
+ * Given the operations here only modify a single bit, assuming
+ * non-atomicity of the writer is sufficient may be reasonable
+ * for certain usage (and follows the permissible nature of the
+ * assume-plain-writes-atomic rule):
+ * 1. report read-modify-write races -> check read;
+ * 2. do not report races with marked readers, but do report
+ * races with unmarked readers -> check "atomic" write.
+ */
+ kcsan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ /*
+ * Use generic write instrumentation, in case other sanitizers
+ * or tools are enabled alongside KCSAN.
+ */
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ } else {
+ instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
+ }
+}
+
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
@@ -68,7 +92,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
{
- instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_set_bit(nr, addr);
}
@@ -82,7 +106,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_clear_bit(nr, addr);
}
@@ -96,7 +120,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
{
- instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_change_bit(nr, addr);
}
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
index 43e6ea591975..42faebbaa202 100644
--- a/include/linux/instrumented.h
+++ b/include/linux/instrumented.h
@@ -43,6 +43,21 @@ static __always_inline void instrument_write(const volatile void *v, size_t size
}
/**
+ * instrument_read_write - instrument regular read-write access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_read_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_read_write(v, size);
+}
+
+/**
* instrument_atomic_read - instrument atomic read access
*
* Instrument an atomic read access. The instrumentation should be inserted
@@ -73,6 +88,21 @@ static __always_inline void instrument_atomic_write(const volatile void *v, size
}
/**
+ * instrument_atomic_read_write - instrument atomic read-write access
+ *
+ * Instrument an atomic read-write access. The instrumentation should be
+ * inserted before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_read_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_atomic_read_write(v, size);
+}
+
+/**
* instrument_copy_to_user - instrument reads of copy_to_user
*
* Instrument reads from kernel memory, that are due to copy_to_user (and
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index c5f6c1dcf7e3..cf14840609ce 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -7,19 +7,13 @@
#include <linux/compiler_attributes.h>
#include <linux/types.h>
-/*
- * ACCESS TYPE MODIFIERS
- *
- * <none>: normal read access;
- * WRITE : write access;
- * ATOMIC: access is atomic;
- * ASSERT: access is not a regular access, but an assertion;
- * SCOPED: access is a scoped access;
- */
-#define KCSAN_ACCESS_WRITE 0x1
-#define KCSAN_ACCESS_ATOMIC 0x2
-#define KCSAN_ACCESS_ASSERT 0x4
-#define KCSAN_ACCESS_SCOPED 0x8
+/* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */
+#define KCSAN_ACCESS_WRITE (1 << 0) /* Access is a write. */
+#define KCSAN_ACCESS_COMPOUND (1 << 1) /* Compounded read-write instrumentation. */
+#define KCSAN_ACCESS_ATOMIC (1 << 2) /* Access is atomic. */
+/* The following are special, and never due to compiler instrumentation. */
+#define KCSAN_ACCESS_ASSERT (1 << 3) /* Access is an assertion. */
+#define KCSAN_ACCESS_SCOPED (1 << 4) /* Access is a scoped access. */
/*
* __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
@@ -205,6 +199,15 @@ static inline void __kcsan_disable_current(void) { }
__kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
/**
+ * __kcsan_check_read_write - check regular read-write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_read_write(ptr, size) \
+ __kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
+
+/**
* kcsan_check_read - check regular read access for races
*
* @ptr: address of access
@@ -221,18 +224,30 @@ static inline void __kcsan_disable_current(void) { }
#define kcsan_check_write(ptr, size) \
kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+/**
+ * kcsan_check_read_write - check regular read-write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_read_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
+
/*
* Check for atomic accesses: if atomic accesses are not ignored, this simply
* aliases to kcsan_check_access(), otherwise becomes a no-op.
*/
#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
-#define kcsan_check_atomic_read(...) do { } while (0)
-#define kcsan_check_atomic_write(...) do { } while (0)
+#define kcsan_check_atomic_read(...) do { } while (0)
+#define kcsan_check_atomic_write(...) do { } while (0)
+#define kcsan_check_atomic_read_write(...) do { } while (0)
#else
#define kcsan_check_atomic_read(ptr, size) \
kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
#define kcsan_check_atomic_write(ptr, size) \
kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
+#define kcsan_check_atomic_read_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)
#endif
/**
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 9147ff6a12e5..3994a217bde7 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "kcsan: " fmt
+
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/delay.h>
@@ -98,6 +100,9 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
*/
static DEFINE_PER_CPU(long, kcsan_skip);
+/* For kcsan_prandom_u32_max(). */
+static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state);
+
static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
size_t size,
bool expect_write,
@@ -223,7 +228,7 @@ is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx
if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
(type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
- IS_ALIGNED((unsigned long)ptr, size))
+ !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
return true; /* Assume aligned writes up to word size are atomic. */
if (ctx->atomic_next > 0) {
@@ -269,11 +274,28 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
return true;
}
+/*
+ * Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max()
+ * for more details.
+ *
+ * The open-coded version here is using only safe primitives for all contexts
+ * where we can have KCSAN instrumentation. In particular, we cannot use
+ * prandom_u32() directly, as its tracepoint could cause recursion.
+ */
+static u32 kcsan_prandom_u32_max(u32 ep_ro)
+{
+ struct rnd_state *state = &get_cpu_var(kcsan_rand_state);
+ const u32 res = prandom_u32_state(state);
+
+ put_cpu_var(kcsan_rand_state);
+ return (u32)(((u64) res * ep_ro) >> 32);
+}
+
static inline void reset_kcsan_skip(void)
{
long skip_count = kcsan_skip_watch -
(IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
- prandom_u32_max(kcsan_skip_watch) :
+ kcsan_prandom_u32_max(kcsan_skip_watch) :
0);
this_cpu_write(kcsan_skip, skip_count);
}
@@ -283,12 +305,18 @@ static __always_inline bool kcsan_is_enabled(void)
return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
}
-static inline unsigned int get_delay(void)
+/* Introduce delay depending on context and configuration. */
+static void delay_access(int type)
{
unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
- return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
- prandom_u32_max(delay) :
- 0);
+ /* For certain access types, skew the random delay to be longer. */
+ unsigned int skew_delay_order =
+ (type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
+
+ delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
+ kcsan_prandom_u32_max(delay >> skew_delay_order) :
+ 0;
+ udelay(delay);
}
void kcsan_save_irqtrace(struct task_struct *task)
@@ -361,13 +389,13 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
* already removed the watchpoint, or another thread consumed
* the watchpoint before this thread.
*/
- kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES);
+ atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
}
if ((type & KCSAN_ACCESS_ASSERT) != 0)
- kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+ atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
else
- kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES);
+ atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
user_access_restore(flags);
}
@@ -408,7 +436,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
goto out;
if (!check_encodable((unsigned long)ptr, size)) {
- kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES);
+ atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
goto out;
}
@@ -428,12 +456,12 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
* with which should_watch() returns true should be tweaked so
* that this case happens very rarely.
*/
- kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY);
+ atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
goto out_unlock;
}
- kcsan_counter_inc(KCSAN_COUNTER_SETUP_WATCHPOINTS);
- kcsan_counter_inc(KCSAN_COUNTER_USED_WATCHPOINTS);
+ atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
+ atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
/*
* Read the current value, to later check and infer a race if the data
@@ -459,7 +487,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
kcsan_disable_current();
- pr_err("KCSAN: watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
+ pr_err("watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
is_write ? "write" : "read", size, ptr,
watchpoint_slot((unsigned long)ptr),
encode_watchpoint((unsigned long)ptr, size, is_write));
@@ -470,7 +498,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
* Delay this thread, to increase probability of observing a racy
* conflicting access.
*/
- udelay(get_delay());
+ delay_access(type);
/*
* Re-read value, and check if it is as expected; if not, we infer a
@@ -535,16 +563,16 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
* increment this counter.
*/
if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
- kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+ atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL,
watchpoint - watchpoints);
} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
/* Inferring a race, since the value should not have changed. */
- kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
+ atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
if (is_assert)
- kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+ atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
@@ -557,7 +585,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
* reused after this point.
*/
remove_watchpoint(watchpoint);
- kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS);
+ atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
out_unlock:
if (!kcsan_interrupt_watcher)
local_irq_restore(irq_flags);
@@ -614,13 +642,16 @@ void __init kcsan_init(void)
BUG_ON(!in_task());
kcsan_debugfs_init();
+ prandom_seed_full_state(&kcsan_rand_state);
/*
* We are in the init task, and no other tasks should be running;
* WRITE_ONCE without memory barrier is sufficient.
*/
- if (kcsan_early_enable)
+ if (kcsan_early_enable) {
+ pr_info("enabled early\n");
WRITE_ONCE(kcsan_enabled, true);
+ }
}
/* === Exported interface =================================================== */
@@ -793,7 +824,17 @@ EXPORT_SYMBOL(__kcsan_check_access);
EXPORT_SYMBOL(__tsan_write##size); \
void __tsan_unaligned_write##size(void *ptr) \
__alias(__tsan_write##size); \
- EXPORT_SYMBOL(__tsan_unaligned_write##size)
+ EXPORT_SYMBOL(__tsan_unaligned_write##size); \
+ void __tsan_read_write##size(void *ptr); \
+ void __tsan_read_write##size(void *ptr) \
+ { \
+ check_access(ptr, size, \
+ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
+ } \
+ EXPORT_SYMBOL(__tsan_read_write##size); \
+ void __tsan_unaligned_read_write##size(void *ptr) \
+ __alias(__tsan_read_write##size); \
+ EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
DEFINE_TSAN_READ_WRITE(1);
DEFINE_TSAN_READ_WRITE(2);
@@ -879,3 +920,130 @@ void __tsan_init(void)
{
}
EXPORT_SYMBOL(__tsan_init);
+
+/*
+ * Instrumentation for atomic builtins (__atomic_*, __sync_*).
+ *
+ * Normal kernel code _should not_ be using them directly, but some
+ * architectures may implement some or all atomics using the compilers'
+ * builtins.
+ *
+ * Note: If an architecture decides to fully implement atomics using the
+ * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
+ * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
+ * atomic-instrumented) is no longer necessary.
+ *
+ * TSAN instrumentation replaces atomic accesses with calls to any of the below
+ * functions, whose job is to also execute the operation itself.
+ */
+
+#define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
+ u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
+ u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
+ { \
+ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
+ check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
+ } \
+ return __atomic_load_n(ptr, memorder); \
+ } \
+ EXPORT_SYMBOL(__tsan_atomic##bits##_load); \
+ void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
+ void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
+ { \
+ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
+ check_access(ptr, bits / BITS_PER_BYTE, \
+ KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
+ } \
+ __atomic_store_n(ptr, v, memorder); \
+ } \
+ EXPORT_SYMBOL(__tsan_atomic##bits##_store)
+
+#define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \
+ u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
+ u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
+ { \
+ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
+ check_access(ptr, bits / BITS_PER_BYTE, \
+ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
+ KCSAN_ACCESS_ATOMIC); \
+ } \
+ return __atomic_##op##suffix(ptr, v, memorder); \
+ } \
+ EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
+
+/*
+ * Note: CAS operations are always classified as write, even in case they
+ * fail. We cannot perform check_access() after a write, as it might lead to
+ * false positives, in cases such as:
+ *
+ * T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
+ *
+ * T1: if (__atomic_load_n(&p->flag, ...)) {
+ * modify *p;
+ * p->flag = 0;
+ * }
+ *
+ * The only downside is that, if there are 3 threads, with one CAS that
+ * succeeds, another CAS that fails, and an unmarked racing operation, we may
+ * point at the wrong CAS as the source of the race. However, if we assume that
+ * all CAS can succeed in some other execution, the data race is still valid.
+ */
+#define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak) \
+ int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
+ u##bits val, int mo, int fail_mo); \
+ int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
+ u##bits val, int mo, int fail_mo) \
+ { \
+ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
+ check_access(ptr, bits / BITS_PER_BYTE, \
+ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
+ KCSAN_ACCESS_ATOMIC); \
+ } \
+ return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
+ } \
+ EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
+
+#define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits) \
+ u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
+ int mo, int fail_mo); \
+ u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
+ int mo, int fail_mo) \
+ { \
+ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
+ check_access(ptr, bits / BITS_PER_BYTE, \
+ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
+ KCSAN_ACCESS_ATOMIC); \
+ } \
+ __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
+ return exp; \
+ } \
+ EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
+
+#define DEFINE_TSAN_ATOMIC_OPS(bits) \
+ DEFINE_TSAN_ATOMIC_LOAD_STORE(bits); \
+ DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n); \
+ DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, ); \
+ DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, ); \
+ DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, ); \
+ DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, ); \
+ DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, ); \
+ DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, ); \
+ DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \
+ DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1); \
+ DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
+
+DEFINE_TSAN_ATOMIC_OPS(8);
+DEFINE_TSAN_ATOMIC_OPS(16);
+DEFINE_TSAN_ATOMIC_OPS(32);
+DEFINE_TSAN_ATOMIC_OPS(64);
+
+void __tsan_atomic_thread_fence(int memorder);
+void __tsan_atomic_thread_fence(int memorder)
+{
+ __atomic_thread_fence(memorder);
+}
+EXPORT_SYMBOL(__tsan_atomic_thread_fence);
+
+void __tsan_atomic_signal_fence(int memorder);
+void __tsan_atomic_signal_fence(int memorder) { }
+EXPORT_SYMBOL(__tsan_atomic_signal_fence);
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 023e49c58d55..3c8093a371b1 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "kcsan: " fmt
+
#include <linux/atomic.h>
#include <linux/bsearch.h>
#include <linux/bug.h>
@@ -15,10 +17,19 @@
#include "kcsan.h"
-/*
- * Statistics counters.
- */
-static atomic_long_t counters[KCSAN_COUNTER_COUNT];
+atomic_long_t kcsan_counters[KCSAN_COUNTER_COUNT];
+static const char *const counter_names[] = {
+ [KCSAN_COUNTER_USED_WATCHPOINTS] = "used_watchpoints",
+ [KCSAN_COUNTER_SETUP_WATCHPOINTS] = "setup_watchpoints",
+ [KCSAN_COUNTER_DATA_RACES] = "data_races",
+ [KCSAN_COUNTER_ASSERT_FAILURES] = "assert_failures",
+ [KCSAN_COUNTER_NO_CAPACITY] = "no_capacity",
+ [KCSAN_COUNTER_REPORT_RACES] = "report_races",
+ [KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN] = "races_unknown_origin",
+ [KCSAN_COUNTER_UNENCODABLE_ACCESSES] = "unencodable_accesses",
+ [KCSAN_COUNTER_ENCODING_FALSE_POSITIVES] = "encoding_false_positives",
+};
+static_assert(ARRAY_SIZE(counter_names) == KCSAN_COUNTER_COUNT);
/*
* Addresses for filtering functions from reporting. This list can be used as a
@@ -39,34 +50,6 @@ static struct {
};
static DEFINE_SPINLOCK(report_filterlist_lock);
-static const char *counter_to_name(enum kcsan_counter_id id)
-{
- switch (id) {
- case KCSAN_COUNTER_USED_WATCHPOINTS: return "used_watchpoints";
- case KCSAN_COUNTER_SETUP_WATCHPOINTS: return "setup_watchpoints";
- case KCSAN_COUNTER_DATA_RACES: return "data_races";
- case KCSAN_COUNTER_ASSERT_FAILURES: return "assert_failures";
- case KCSAN_COUNTER_NO_CAPACITY: return "no_capacity";
- case KCSAN_COUNTER_REPORT_RACES: return "report_races";
- case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN: return "races_unknown_origin";
- case KCSAN_COUNTER_UNENCODABLE_ACCESSES: return "unencodable_accesses";
- case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES: return "encoding_false_positives";
- case KCSAN_COUNTER_COUNT:
- BUG();
- }
- return NULL;
-}
-
-void kcsan_counter_inc(enum kcsan_counter_id id)
-{
- atomic_long_inc(&counters[id]);
-}
-
-void kcsan_counter_dec(enum kcsan_counter_id id)
-{
- atomic_long_dec(&counters[id]);
-}
-
/*
* The microbenchmark allows benchmarking KCSAN core runtime only. To run
* multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
@@ -86,7 +69,7 @@ static noinline void microbenchmark(unsigned long iters)
*/
WRITE_ONCE(kcsan_enabled, false);
- pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
+ pr_info("%s begin | iters: %lu\n", __func__, iters);
cycles = get_cycles();
while (iters--) {
@@ -97,73 +80,13 @@ static noinline void microbenchmark(unsigned long iters)
}
cycles = get_cycles() - cycles;
- pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
+ pr_info("%s end | cycles: %llu\n", __func__, cycles);
WRITE_ONCE(kcsan_enabled, was_enabled);
/* restore context */
current->kcsan_ctx = ctx_save;
}
-/*
- * Simple test to create conflicting accesses. Write 'test=<iters>' to KCSAN's
- * debugfs file from multiple tasks to generate real conflicts and show reports.
- */
-static long test_dummy;
-static long test_flags;
-static long test_scoped;
-static noinline void test_thread(unsigned long iters)
-{
- const long CHANGE_BITS = 0xff00ff00ff00ff00L;
- const struct kcsan_ctx ctx_save = current->kcsan_ctx;
- cycles_t cycles;
-
- /* We may have been called from an atomic region; reset context. */
- memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
-
- pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
- pr_info("test_dummy@%px, test_flags@%px, test_scoped@%px,\n",
- &test_dummy, &test_flags, &test_scoped);
-
- cycles = get_cycles();
- while (iters--) {
- /* These all should generate reports. */
- __kcsan_check_read(&test_dummy, sizeof(test_dummy));
- ASSERT_EXCLUSIVE_WRITER(test_dummy);
- ASSERT_EXCLUSIVE_ACCESS(test_dummy);
-
- ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */
- __kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
-
- ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */
- __kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
-
- /* not actually instrumented */
- WRITE_ONCE(test_dummy, iters); /* to observe value-change */
- __kcsan_check_write(&test_dummy, sizeof(test_dummy));
-
- test_flags ^= CHANGE_BITS; /* generate value-change */
- __kcsan_check_write(&test_flags, sizeof(test_flags));
-
- BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
- {
- /* Should generate reports anywhere in this block. */
- ASSERT_EXCLUSIVE_WRITER_SCOPED(test_scoped);
- ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_scoped);
- BUG_ON(!current->kcsan_ctx.scoped_accesses.prev);
- /* Unrelated accesses. */
- __kcsan_check_access(&cycles, sizeof(cycles), 0);
- __kcsan_check_access(&cycles, sizeof(cycles), KCSAN_ACCESS_ATOMIC);
- }
- BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
- }
- cycles = get_cycles() - cycles;
-
- pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
-
- /* restore context */
- current->kcsan_ctx = ctx_save;
-}
-
static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
{
const unsigned long a = *(const unsigned long *)rhs;
@@ -220,7 +143,7 @@ static ssize_t insert_report_filterlist(const char *func)
ssize_t ret = 0;
if (!addr) {
- pr_err("KCSAN: could not find function: '%s'\n", func);
+ pr_err("could not find function: '%s'\n", func);
return -ENOENT;
}
@@ -270,9 +193,10 @@ static int show_info(struct seq_file *file, void *v)
/* show stats */
seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
- for (i = 0; i < KCSAN_COUNTER_COUNT; ++i)
- seq_printf(file, "%s: %ld\n", counter_to_name(i),
- atomic_long_read(&counters[i]));
+ for (i = 0; i < KCSAN_COUNTER_COUNT; ++i) {
+ seq_printf(file, "%s: %ld\n", counter_names[i],
+ atomic_long_read(&kcsan_counters[i]));
+ }
/* show filter functions, and filter type */
spin_lock_irqsave(&report_filterlist_lock, flags);
@@ -307,18 +231,12 @@ debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *o
WRITE_ONCE(kcsan_enabled, true);
} else if (!strcmp(arg, "off")) {
WRITE_ONCE(kcsan_enabled, false);
- } else if (!strncmp(arg, "microbench=", sizeof("microbench=") - 1)) {
+ } else if (str_has_prefix(arg, "microbench=")) {
unsigned long iters;
- if (kstrtoul(&arg[sizeof("microbench=") - 1], 0, &iters))
+ if (kstrtoul(&arg[strlen("microbench=")], 0, &iters))
return -EINVAL;
microbenchmark(iters);
- } else if (!strncmp(arg, "test=", sizeof("test=") - 1)) {
- unsigned long iters;
-
- if (kstrtoul(&arg[sizeof("test=") - 1], 0, &iters))
- return -EINVAL;
- test_thread(iters);
} else if (!strcmp(arg, "whitelist")) {
set_report_filterlist_whitelist(true);
} else if (!strcmp(arg, "blacklist")) {
diff --git a/kernel/kcsan/kcsan-test.c b/kernel/kcsan/kcsan-test.c
index fed6fcb5768c..ebe7fd245104 100644
--- a/kernel/kcsan/kcsan-test.c
+++ b/kernel/kcsan/kcsan-test.c
@@ -27,6 +27,12 @@
#include <linux/types.h>
#include <trace/events/printk.h>
+#ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
+#define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
+#else
+#define __KCSAN_ACCESS_RW(alt) (alt)
+#endif
+
/* Points to current test-case memory access "kernels". */
static void (*access_kernels[2])(void);
@@ -186,20 +192,21 @@ static bool report_matches(const struct expect_report *r)
/* Access 1 & 2 */
for (i = 0; i < 2; ++i) {
+ const int ty = r->access[i].type;
const char *const access_type =
- (r->access[i].type & KCSAN_ACCESS_ASSERT) ?
- ((r->access[i].type & KCSAN_ACCESS_WRITE) ?
- "assert no accesses" :
- "assert no writes") :
- ((r->access[i].type & KCSAN_ACCESS_WRITE) ?
- "write" :
- "read");
+ (ty & KCSAN_ACCESS_ASSERT) ?
+ ((ty & KCSAN_ACCESS_WRITE) ?
+ "assert no accesses" :
+ "assert no writes") :
+ ((ty & KCSAN_ACCESS_WRITE) ?
+ ((ty & KCSAN_ACCESS_COMPOUND) ?
+ "read-write" :
+ "write") :
+ "read");
const char *const access_type_aux =
- (r->access[i].type & KCSAN_ACCESS_ATOMIC) ?
- " (marked)" :
- ((r->access[i].type & KCSAN_ACCESS_SCOPED) ?
- " (scoped)" :
- "");
+ (ty & KCSAN_ACCESS_ATOMIC) ?
+ " (marked)" :
+ ((ty & KCSAN_ACCESS_SCOPED) ? " (scoped)" : "");
if (i == 1) {
/* Access 2 */
@@ -277,6 +284,12 @@ static noinline void test_kernel_write_atomic(void)
WRITE_ONCE(test_var, READ_ONCE_NOCHECK(test_sink) + 1);
}
+static noinline void test_kernel_atomic_rmw(void)
+{
+ /* Use builtin, so we can set up the "bad" atomic/non-atomic scenario. */
+ __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED);
+}
+
__no_kcsan
static noinline void test_kernel_write_uninstrumented(void) { test_var++; }
@@ -390,6 +403,15 @@ static noinline void test_kernel_seqlock_writer(void)
write_sequnlock_irqrestore(&test_seqlock, flags);
}
+static noinline void test_kernel_atomic_builtins(void)
+{
+ /*
+ * Generate concurrent accesses, expecting no reports, ensuring KCSAN
+ * treats builtin atomics as actually atomic.
+ */
+ __atomic_load_n(&test_var, __ATOMIC_RELAXED);
+}
+
/* ===== Test cases ===== */
/* Simple test with normal data race. */
@@ -430,8 +452,8 @@ static void test_concurrent_races(struct kunit *test)
const struct expect_report expect = {
.access = {
/* NULL will match any address. */
- { test_kernel_rmw_array, NULL, 0, KCSAN_ACCESS_WRITE },
- { test_kernel_rmw_array, NULL, 0, 0 },
+ { test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
+ { test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(0) },
},
};
static const struct expect_report never = {
@@ -620,6 +642,29 @@ static void test_read_plain_atomic_write(struct kunit *test)
KUNIT_EXPECT_TRUE(test, match_expect);
}
+/* Test that atomic RMWs generate correct report. */
+__no_kcsan
+static void test_read_plain_atomic_rmw(struct kunit *test)
+{
+ const struct expect_report expect = {
+ .access = {
+ { test_kernel_read, &test_var, sizeof(test_var), 0 },
+ { test_kernel_atomic_rmw, &test_var, sizeof(test_var),
+ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC },
+ },
+ };
+ bool match_expect = false;
+
+ if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS))
+ return;
+
+ begin_test_checks(test_kernel_read, test_kernel_atomic_rmw);
+ do {
+ match_expect = report_matches(&expect);
+ } while (!end_test_checks(match_expect));
+ KUNIT_EXPECT_TRUE(test, match_expect);
+}
+
/* Zero-sized accesses should never cause data race reports. */
__no_kcsan
static void test_zero_size_access(struct kunit *test)
@@ -853,6 +898,59 @@ static void test_seqlock_noreport(struct kunit *test)
}
/*
+ * Test atomic builtins work and required instrumentation functions exist. We
+ * also test that KCSAN understands they're atomic by racing with them via
+ * test_kernel_atomic_builtins(), and expect no reports.
+ *
+ * The atomic builtins _SHOULD NOT_ be used in normal kernel code!
+ */
+static void test_atomic_builtins(struct kunit *test)
+{
+ bool match_never = false;
+
+ begin_test_checks(test_kernel_atomic_builtins, test_kernel_atomic_builtins);
+ do {
+ long tmp;
+
+ kcsan_enable_current();
+
+ __atomic_store_n(&test_var, 42L, __ATOMIC_RELAXED);
+ KUNIT_EXPECT_EQ(test, 42L, __atomic_load_n(&test_var, __ATOMIC_RELAXED));
+
+ KUNIT_EXPECT_EQ(test, 42L, __atomic_exchange_n(&test_var, 20, __ATOMIC_RELAXED));
+ KUNIT_EXPECT_EQ(test, 20L, test_var);
+
+ tmp = 20L;
+ KUNIT_EXPECT_TRUE(test, __atomic_compare_exchange_n(&test_var, &tmp, 30L,
+ 0, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED));
+ KUNIT_EXPECT_EQ(test, tmp, 20L);
+ KUNIT_EXPECT_EQ(test, test_var, 30L);
+ KUNIT_EXPECT_FALSE(test, __atomic_compare_exchange_n(&test_var, &tmp, 40L,
+ 1, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED));
+ KUNIT_EXPECT_EQ(test, tmp, 30L);
+ KUNIT_EXPECT_EQ(test, test_var, 30L);
+
+ KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED));
+ KUNIT_EXPECT_EQ(test, 31L, __atomic_fetch_sub(&test_var, 1, __ATOMIC_RELAXED));
+ KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_and(&test_var, 0xf, __ATOMIC_RELAXED));
+ KUNIT_EXPECT_EQ(test, 14L, __atomic_fetch_xor(&test_var, 0xf, __ATOMIC_RELAXED));
+ KUNIT_EXPECT_EQ(test, 1L, __atomic_fetch_or(&test_var, 0xf0, __ATOMIC_RELAXED));
+ KUNIT_EXPECT_EQ(test, 241L, __atomic_fetch_nand(&test_var, 0xf, __ATOMIC_RELAXED));
+ KUNIT_EXPECT_EQ(test, -2L, test_var);
+
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+
+ kcsan_disable_current();
+
+ match_never = report_available();
+ } while (!end_test_checks(match_never));
+ KUNIT_EXPECT_FALSE(test, match_never);
+}
+
+/*
* Each test case is run with different numbers of threads. Until KUnit supports
* passing arguments for each test case, we encode #threads in the test case
* name (read by get_num_threads()). [The '-' was chosen as a stylistic
@@ -880,6 +978,7 @@ static struct kunit_case kcsan_test_cases[] = {
KCSAN_KUNIT_CASE(test_write_write_struct_part),
KCSAN_KUNIT_CASE(test_read_atomic_write_atomic),
KCSAN_KUNIT_CASE(test_read_plain_atomic_write),
+ KCSAN_KUNIT_CASE(test_read_plain_atomic_rmw),
KCSAN_KUNIT_CASE(test_zero_size_access),
KCSAN_KUNIT_CASE(test_data_race),
KCSAN_KUNIT_CASE(test_assert_exclusive_writer),
@@ -891,6 +990,7 @@ static struct kunit_case kcsan_test_cases[] = {
KCSAN_KUNIT_CASE(test_assert_exclusive_access_scoped),
KCSAN_KUNIT_CASE(test_jiffies_noreport),
KCSAN_KUNIT_CASE(test_seqlock_noreport),
+ KCSAN_KUNIT_CASE(test_atomic_builtins),
{},
};
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index 29480010dc30..8d4bf3431b3c 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -8,6 +8,7 @@
#ifndef _KERNEL_KCSAN_KCSAN_H
#define _KERNEL_KCSAN_KCSAN_H
+#include <linux/atomic.h>
#include <linux/kcsan.h>
#include <linux/sched.h>
@@ -34,6 +35,10 @@ void kcsan_restore_irqtrace(struct task_struct *task);
*/
void kcsan_debugfs_init(void);
+/*
+ * Statistics counters displayed via debugfs; should only be modified in
+ * slow-paths.
+ */
enum kcsan_counter_id {
/*
* Number of watchpoints currently in use.
@@ -86,12 +91,7 @@ enum kcsan_counter_id {
KCSAN_COUNTER_COUNT, /* number of counters */
};
-
-/*
- * Increment/decrement counter with given id; avoid calling these in fast-path.
- */
-extern void kcsan_counter_inc(enum kcsan_counter_id id);
-extern void kcsan_counter_dec(enum kcsan_counter_id id);
+extern atomic_long_t kcsan_counters[KCSAN_COUNTER_COUNT];
/*
* Returns true if data races in the function symbol that maps to func_addr
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 9d07e175de0f..d3bf87e6007c 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -228,6 +228,10 @@ static const char *get_access_type(int type)
return "write";
case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
return "write (marked)";
+ case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
+ return "read-write";
+ case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
+ return "read-write (marked)";
case KCSAN_ACCESS_SCOPED:
return "read (scoped)";
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC:
@@ -275,8 +279,8 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries
cur = strnstr(buf, "kcsan_", len);
if (cur) {
- cur += sizeof("kcsan_") - 1;
- if (strncmp(cur, "test", sizeof("test") - 1))
+ cur += strlen("kcsan_");
+ if (!str_has_prefix(cur, "test"))
continue; /* KCSAN runtime function. */
/* KCSAN related test. */
}
@@ -555,7 +559,7 @@ static bool prepare_report_consumer(unsigned long *flags,
* If the actual accesses to not match, this was a false
* positive due to watchpoint encoding.
*/
- kcsan_counter_inc(KCSAN_COUNTER_ENCODING_FALSE_POSITIVES);
+ atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ENCODING_FALSE_POSITIVES]);
goto discard;
}
diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c
index d26a052d3383..d98bc208d06d 100644
--- a/kernel/kcsan/selftest.c
+++ b/kernel/kcsan/selftest.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "kcsan: " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/printk.h>
@@ -116,16 +118,16 @@ static int __init kcsan_selftest(void)
if (do_test()) \
++passed; \
else \
- pr_err("KCSAN selftest: " #do_test " failed"); \
+ pr_err("selftest: " #do_test " failed"); \
} while (0)
RUN_TEST(test_requires);
RUN_TEST(test_encode_decode);
RUN_TEST(test_matching_access);
- pr_info("KCSAN selftest: %d/%d tests passed\n", passed, total);
+ pr_info("selftest: %d/%d tests passed\n", passed, total);
if (passed != total)
- panic("KCSAN selftests failed");
+ panic("selftests failed");
return 0;
}
postcore_initcall(kcsan_selftest);
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 3d282d51849b..f271ff5fbb5a 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -40,6 +40,11 @@ menuconfig KCSAN
if KCSAN
+# Compiler capabilities that should not fail the test if they are unavailable.
+config CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
+ def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-compound-read-before-write=1)) || \
+ (CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-compound-read-before-write=1))
+
config KCSAN_VERBOSE
bool "Show verbose reports with more information about system state"
depends on PROVE_LOCKING
diff --git a/scripts/Makefile.kcsan b/scripts/Makefile.kcsan
index c50f27b3ac56..c37f9518d5d9 100644
--- a/scripts/Makefile.kcsan
+++ b/scripts/Makefile.kcsan
@@ -11,5 +11,5 @@ endif
# of some options does not break KCSAN nor causes false positive reports.
CFLAGS_KCSAN := -fsanitize=thread \
$(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0) -fno-optimize-sibling-calls) \
- $(call cc-option,$(call cc-param,tsan-instrument-read-before-write=1)) \
+ $(call cc-option,$(call cc-param,tsan-compound-read-before-write=1),$(call cc-option,$(call cc-param,tsan-instrument-read-before-write=1))) \
$(call cc-param,tsan-distinguish-volatile=1)
diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh
index 6afadf73da17..2b7fec7e6abc 100755
--- a/scripts/atomic/gen-atomic-instrumented.sh
+++ b/scripts/atomic/gen-atomic-instrumented.sh
@@ -5,9 +5,10 @@ ATOMICDIR=$(dirname $0)
. ${ATOMICDIR}/atomic-tbl.sh
-#gen_param_check(arg)
+#gen_param_check(meta, arg)
gen_param_check()
{
+ local meta="$1"; shift
local arg="$1"; shift
local type="${arg%%:*}"
local name="$(gen_param_name "${arg}")"
@@ -17,17 +18,25 @@ gen_param_check()
i) return;;
esac
- # We don't write to constant parameters
- [ ${type#c} != ${type} ] && rw="read"
+ if [ ${type#c} != ${type} ]; then
+ # We don't write to constant parameters.
+ rw="read"
+ elif [ "${meta}" != "s" ]; then
+ # An atomic RMW: if this parameter is not a constant, and this atomic is
+ # not just a 's'tore, this parameter is both read from and written to.
+ rw="read_write"
+ fi
printf "\tinstrument_atomic_${rw}(${name}, sizeof(*${name}));\n"
}
-#gen_param_check(arg...)
+#gen_params_checks(meta, arg...)
gen_params_checks()
{
+ local meta="$1"; shift
+
while [ "$#" -gt 0 ]; do
- gen_param_check "$1"
+ gen_param_check "$meta" "$1"
shift;
done
}
@@ -77,7 +86,7 @@ gen_proto_order_variant()
local ret="$(gen_ret_type "${meta}" "${int}")"
local params="$(gen_params "${int}" "${atomic}" "$@")"
- local checks="$(gen_params_checks "$@")"
+ local checks="$(gen_params_checks "${meta}" "$@")"
local args="$(gen_args "$@")"
local retstmt="$(gen_ret_stmt "${meta}")"
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 90a66891441a..3bd156d39747 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -528,6 +528,61 @@ static const char *uaccess_safe_builtin[] = {
"__tsan_write4",
"__tsan_write8",
"__tsan_write16",
+ "__tsan_read_write1",
+ "__tsan_read_write2",
+ "__tsan_read_write4",
+ "__tsan_read_write8",
+ "__tsan_read_write16",
+ "__tsan_atomic8_load",
+ "__tsan_atomic16_load",
+ "__tsan_atomic32_load",
+ "__tsan_atomic64_load",
+ "__tsan_atomic8_store",
+ "__tsan_atomic16_store",
+ "__tsan_atomic32_store",
+ "__tsan_atomic64_store",
+ "__tsan_atomic8_exchange",
+ "__tsan_atomic16_exchange",
+ "__tsan_atomic32_exchange",
+ "__tsan_atomic64_exchange",
+ "__tsan_atomic8_fetch_add",
+ "__tsan_atomic16_fetch_add",
+ "__tsan_atomic32_fetch_add",
+ "__tsan_atomic64_fetch_add",
+ "__tsan_atomic8_fetch_sub",
+ "__tsan_atomic16_fetch_sub",
+ "__tsan_atomic32_fetch_sub",
+ "__tsan_atomic64_fetch_sub",
+ "__tsan_atomic8_fetch_and",
+ "__tsan_atomic16_fetch_and",
+ "__tsan_atomic32_fetch_and",
+ "__tsan_atomic64_fetch_and",
+ "__tsan_atomic8_fetch_or",
+ "__tsan_atomic16_fetch_or",
+ "__tsan_atomic32_fetch_or",
+ "__tsan_atomic64_fetch_or",
+ "__tsan_atomic8_fetch_xor",
+ "__tsan_atomic16_fetch_xor",
+ "__tsan_atomic32_fetch_xor",
+ "__tsan_atomic64_fetch_xor",
+ "__tsan_atomic8_fetch_nand",
+ "__tsan_atomic16_fetch_nand",
+ "__tsan_atomic32_fetch_nand",
+ "__tsan_atomic64_fetch_nand",
+ "__tsan_atomic8_compare_exchange_strong",
+ "__tsan_atomic16_compare_exchange_strong",
+ "__tsan_atomic32_compare_exchange_strong",
+ "__tsan_atomic64_compare_exchange_strong",
+ "__tsan_atomic8_compare_exchange_weak",
+ "__tsan_atomic16_compare_exchange_weak",
+ "__tsan_atomic32_compare_exchange_weak",
+ "__tsan_atomic64_compare_exchange_weak",
+ "__tsan_atomic8_compare_exchange_val",
+ "__tsan_atomic16_compare_exchange_val",
+ "__tsan_atomic32_compare_exchange_val",
+ "__tsan_atomic64_compare_exchange_val",
+ "__tsan_atomic_thread_fence",
+ "__tsan_atomic_signal_fence",
/* KCOV */
"write_comp_data",
"check_kcov_mode",