summaryrefslogtreecommitdiff
path: root/kernel/kcsan/core.c
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2020-02-25 15:32:58 +0100
committerPaul E. McKenney <paulmck@kernel.org>2020-03-25 09:56:00 -0700
commit44656d3dc4f0dc20010d054f27397a4a1469fabf (patch)
tree3cc0c8938304bb29992524788e001270d1afb4ca /kernel/kcsan/core.c
parent2402d0eae589a31ee7b1774cb220d84d0f5605b4 (diff)
kcsan: Add current->state to implicitly atomic accesses
Add volatile current->state to list of implicitly atomic accesses. This is in preparation to eventually enable KCSAN on kernel/sched (which currently still has KCSAN_SANITIZE := n). Since accesses that match the special check in atomic.h are rare, it makes more sense to move this check to the slow-path, avoiding the additional compare in the fast-path. With the microbenchmark, a speedup of ~6% is measured. Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/kcsan/core.c')
-rw-r--r--kernel/kcsan/core.c22
1 files changed, 15 insertions, 7 deletions
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 065615df88ea..eb30ecdc8c00 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -188,12 +188,13 @@ static __always_inline struct kcsan_ctx *get_ctx(void)
return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
}
+/* Rules for generic atomic accesses. Called from fast-path. */
static __always_inline bool
is_atomic(const volatile void *ptr, size_t size, int type)
{
struct kcsan_ctx *ctx;
- if ((type & KCSAN_ACCESS_ATOMIC) != 0)
+ if (type & KCSAN_ACCESS_ATOMIC)
return true;
/*
@@ -201,16 +202,16 @@ is_atomic(const volatile void *ptr, size_t size, int type)
* as atomic. This allows using them also in atomic regions, such as
* seqlocks, without implicitly changing their semantics.
*/
- if ((type & KCSAN_ACCESS_ASSERT) != 0)
+ if (type & KCSAN_ACCESS_ASSERT)
return false;
if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
- (type & KCSAN_ACCESS_WRITE) != 0 && size <= sizeof(long) &&
+ (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
IS_ALIGNED((unsigned long)ptr, size))
return true; /* Assume aligned writes up to word size are atomic. */
ctx = get_ctx();
- if (unlikely(ctx->atomic_next > 0)) {
+ if (ctx->atomic_next > 0) {
/*
* Because we do not have separate contexts for nested
* interrupts, in case atomic_next is set, we simply assume that
@@ -224,10 +225,8 @@ is_atomic(const volatile void *ptr, size_t size, int type)
--ctx->atomic_next; /* in task, or outer interrupt */
return true;
}
- if (unlikely(ctx->atomic_nest_count > 0 || ctx->in_flat_atomic))
- return true;
- return kcsan_is_atomic(ptr);
+ return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
}
static __always_inline bool
@@ -367,6 +366,15 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
if (!kcsan_is_enabled())
goto out;
+ /*
+ * Special atomic rules: unlikely to be true, so we check them here in
+ * the slow-path, and not in the fast-path in is_atomic(). Call after
+ * kcsan_is_enabled(), as we may access memory that is not yet
+ * initialized during early boot.
+ */
+ if (!is_assert && kcsan_is_atomic_special(ptr))
+ goto out;
+
if (!check_encodable((unsigned long)ptr, size)) {
kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES);
goto out;