summaryrefslogtreecommitdiff
path: root/mm/kasan
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/Makefile8
-rw-r--r--mm/kasan/common.c62
-rw-r--r--mm/kasan/kasan.h6
-rw-r--r--mm/kasan/kasan_test_c.c (renamed from mm/kasan/kasan_test.c)57
-rw-r--r--mm/kasan/kasan_test_rust.rs21
5 files changed, 128 insertions, 26 deletions
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 7634dd2a6128..b88543e5c0cc 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -44,7 +44,8 @@ ifndef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
CFLAGS_KASAN_TEST += -fno-builtin
endif
-CFLAGS_kasan_test.o := $(CFLAGS_KASAN_TEST)
+CFLAGS_kasan_test_c.o := $(CFLAGS_KASAN_TEST)
+RUSTFLAGS_kasan_test_rust.o := $(RUSTFLAGS_KASAN)
CFLAGS_kasan_test_module.o := $(CFLAGS_KASAN_TEST)
obj-y := common.o report.o
@@ -52,5 +53,10 @@ obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quaran
obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o tags.o report_tags.o
obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o tags.o report_tags.o
+kasan_test-objs := kasan_test_c.o
+ifdef CONFIG_RUST
+ kasan_test-objs += kasan_test_rust.o
+endif
+
obj-$(CONFIG_KASAN_KUNIT_TEST) += kasan_test.o
obj-$(CONFIG_KASAN_MODULE_TEST) += kasan_test_module.o
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 85e7c6b4575c..ed4873e18c75 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -208,15 +208,12 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
return (void *)object;
}
-static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
- unsigned long ip, bool init)
+/* Returns true when freeing the object is not safe. */
+static bool check_slab_allocation(struct kmem_cache *cache, void *object,
+ unsigned long ip)
{
- void *tagged_object;
-
- if (!kasan_arch_is_ready())
- return false;
+ void *tagged_object = object;
- tagged_object = object;
object = kasan_reset_tag(object);
if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
@@ -224,37 +221,47 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
return true;
}
- /* RCU slabs could be legally used after free within the RCU period. */
- if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
- return false;
-
if (!kasan_byte_accessible(tagged_object)) {
kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
return true;
}
+ return false;
+}
+
+static inline void poison_slab_object(struct kmem_cache *cache, void *object,
+ bool init, bool still_accessible)
+{
+ void *tagged_object = object;
+
+ object = kasan_reset_tag(object);
+
+ /* RCU slabs could be legally used after free within the RCU period. */
+ if (unlikely(still_accessible))
+ return;
+
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_SLAB_FREE, init);
if (kasan_stack_collection_enabled())
kasan_save_free_info(cache, tagged_object);
+}
- return false;
+bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
+ unsigned long ip)
+{
+ if (!kasan_arch_is_ready() || is_kfence_address(object))
+ return false;
+ return check_slab_allocation(cache, object, ip);
}
-bool __kasan_slab_free(struct kmem_cache *cache, void *object,
- unsigned long ip, bool init)
+bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
+ bool still_accessible)
{
- if (is_kfence_address(object))
+ if (!kasan_arch_is_ready() || is_kfence_address(object))
return false;
- /*
- * If the object is buggy, do not let slab put the object onto the
- * freelist. The object will thus never be allocated again and its
- * metadata will never get released.
- */
- if (poison_slab_object(cache, object, ip, init))
- return true;
+ poison_slab_object(cache, object, init, still_accessible);
/*
* If the object is put into quarantine, do not let slab put the object
@@ -504,11 +511,16 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
return true;
}
- if (is_kfence_address(ptr))
- return false;
+ if (is_kfence_address(ptr) || !kasan_arch_is_ready())
+ return true;
slab = folio_slab(folio);
- return !poison_slab_object(slab->slab_cache, ptr, ip, false);
+
+ if (check_slab_allocation(slab->slab_cache, ptr, ip))
+ return false;
+
+ poison_slab_object(slab->slab_cache, ptr, false, false);
+ return true;
}
void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index fb2b9ac0659a..f438a6cdc964 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -555,6 +555,12 @@ static inline bool kasan_arch_is_ready(void) { return true; }
void kasan_kunit_test_suite_start(void);
void kasan_kunit_test_suite_end(void);
+#ifdef CONFIG_RUST
+char kasan_test_rust_uaf(void);
+#else
+static inline char kasan_test_rust_uaf(void) { return '\0'; }
+#endif
+
#else /* CONFIG_KASAN_KUNIT_TEST */
static inline void kasan_kunit_test_suite_start(void) { }
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test_c.c
index 7b32be2a3cf0..a181e4780d9d 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test_c.c
@@ -996,6 +996,51 @@ static void kmem_cache_invalid_free(struct kunit *test)
kmem_cache_destroy(cache);
}
+static void kmem_cache_rcu_uaf(struct kunit *test)
+{
+ char *p;
+ size_t size = 200;
+ struct kmem_cache *cache;
+
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG);
+
+ cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ kmem_cache_destroy(cache);
+ return;
+ }
+ *p = 1;
+
+ rcu_read_lock();
+
+ /* Free the object - this will internally schedule an RCU callback. */
+ kmem_cache_free(cache, p);
+
+ /*
+ * We should still be allowed to access the object at this point because
+ * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
+ * critical section since before the kmem_cache_free().
+ */
+ READ_ONCE(*p);
+
+ rcu_read_unlock();
+
+ /*
+ * Wait for the RCU callback to execute; after this, the object should
+ * have actually been freed from KASAN's perspective.
+ */
+ rcu_barrier();
+
+ KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
+
+ kmem_cache_destroy(cache);
+}
+
static void empty_cache_ctor(void *object) { }
static void kmem_cache_double_destroy(struct kunit *test)
@@ -1899,6 +1944,16 @@ static void match_all_mem_tag(struct kunit *test)
kfree(ptr);
}
+/*
+ * Check that Rust performing a use-after-free using `unsafe` is detected.
+ * This is a smoke test to make sure that Rust is being sanitized properly.
+ */
+static void rust_uaf(struct kunit *test)
+{
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_RUST);
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf());
+}
+
static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_right),
KUNIT_CASE(kmalloc_oob_left),
@@ -1937,6 +1992,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmem_cache_oob),
KUNIT_CASE(kmem_cache_double_free),
KUNIT_CASE(kmem_cache_invalid_free),
+ KUNIT_CASE(kmem_cache_rcu_uaf),
KUNIT_CASE(kmem_cache_double_destroy),
KUNIT_CASE(kmem_cache_accounted),
KUNIT_CASE(kmem_cache_bulk),
@@ -1971,6 +2027,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(match_all_not_assigned),
KUNIT_CASE(match_all_ptr_tag),
KUNIT_CASE(match_all_mem_tag),
+ KUNIT_CASE(rust_uaf),
{}
};
diff --git a/mm/kasan/kasan_test_rust.rs b/mm/kasan/kasan_test_rust.rs
new file mode 100644
index 000000000000..caa7175964ef
--- /dev/null
+++ b/mm/kasan/kasan_test_rust.rs
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Helper crate for KASAN testing.
+//!
+//! Provides behavior to check the sanitization of Rust code.
+
+use core::ptr::addr_of_mut;
+use kernel::prelude::*;
+
+/// Trivial UAF - allocate a big vector, grab a pointer partway through,
+/// drop the vector, and touch it.
+#[no_mangle]
+pub extern "C" fn kasan_test_rust_uaf() -> u8 {
+ let mut v: Vec<u8> = Vec::new();
+ for _ in 0..4096 {
+ v.push(0x42, GFP_KERNEL).unwrap();
+ }
+ let ptr: *mut u8 = addr_of_mut!(v[2048]);
+ drop(v);
+ unsafe { *ptr }
+}