summaryrefslogtreecommitdiff
path: root/mm/kasan/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan/common.c')
-rw-r--r--mm/kasan/common.c280
1 files changed, 194 insertions, 86 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 5d95219e69d7..610efae91220 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -20,8 +20,10 @@
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
+#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -37,19 +39,35 @@ struct slab *kasan_addr_to_slab(const void *addr)
return NULL;
}
-depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
+depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags)
{
unsigned long entries[KASAN_STACK_DEPTH];
unsigned int nr_entries;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
- return __stack_depot_save(entries, nr_entries, flags, can_alloc);
+ return stack_depot_save_flags(entries, nr_entries, flags, depot_flags);
}
-void kasan_set_track(struct kasan_track *track, gfp_t flags)
+void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack)
{
+#ifdef CONFIG_KASAN_EXTRA_INFO
+ u32 cpu = raw_smp_processor_id();
+ u64 ts_nsec = local_clock();
+
+ track->cpu = cpu;
+ track->timestamp = ts_nsec >> 3;
+#endif /* CONFIG_KASAN_EXTRA_INFO */
track->pid = current->pid;
- track->stack = kasan_save_stack(flags, true);
+ track->stack = stack;
+}
+
+void kasan_save_track(struct kasan_track *track, gfp_t flags)
+{
+ depot_stack_handle_t stack;
+
+ stack = kasan_save_stack(flags,
+ STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
+ kasan_set_track(track, stack);
}
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
@@ -69,6 +87,9 @@ EXPORT_SYMBOL(kasan_disable_current);
void __kasan_unpoison_range(const void *address, size_t size)
{
+ if (is_kfence_address(address))
+ return;
+
kasan_unpoison(address, size, false);
}
@@ -133,12 +154,12 @@ void __kasan_poison_slab(struct slab *slab)
KASAN_SLAB_REDZONE, false);
}
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
{
kasan_unpoison(object, cache->object_size, false);
}
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
{
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_SLAB_REDZONE, false);
@@ -188,8 +209,8 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
return (void *)object;
}
-static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
- unsigned long ip, bool quarantine, bool init)
+static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
+ unsigned long ip, bool init)
{
void *tagged_object;
@@ -199,16 +220,12 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
tagged_object = object;
object = kasan_reset_tag(object);
- if (is_kfence_address(object))
- return false;
-
- if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
- object)) {
+ if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
return true;
}
- /* RCU slabs could be legally used after free within the RCU period */
+ /* RCU slabs could be legally used after free within the RCU period. */
if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
return false;
@@ -220,22 +237,45 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_SLAB_FREE, init);
- if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
- return false;
-
if (kasan_stack_collection_enabled())
kasan_save_free_info(cache, tagged_object);
- return kasan_quarantine_put(cache, object);
+ return false;
}
bool __kasan_slab_free(struct kmem_cache *cache, void *object,
unsigned long ip, bool init)
{
- return ____kasan_slab_free(cache, object, ip, true, init);
+ if (is_kfence_address(object))
+ return false;
+
+ /*
+ * If the object is buggy, do not let slab put the object onto the
+ * freelist. The object will thus never be allocated again and its
+ * metadata will never get released.
+ */
+ if (poison_slab_object(cache, object, ip, init))
+ return true;
+
+ /*
+ * If the object is put into quarantine, do not let slab put the object
+ * onto the freelist for now. The object's metadata is kept until the
+ * object gets evicted from quarantine.
+ */
+ if (kasan_quarantine_put(cache, object))
+ return true;
+
+ /*
+ * If the object is not put into quarantine, it will likely be quickly
+ * reallocated. Thus, release its metadata now.
+ */
+ kasan_release_object_meta(cache, object);
+
+ /* Let slab put the object onto the freelist. */
+ return false;
}
-static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
+static inline bool check_page_allocation(void *ptr, unsigned long ip)
{
if (!kasan_arch_is_ready())
return false;
@@ -250,40 +290,28 @@ static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
return true;
}
- /*
- * The object will be poisoned by kasan_poison_pages() or
- * kasan_slab_free_mempool().
- */
-
return false;
}
void __kasan_kfree_large(void *ptr, unsigned long ip)
{
- ____kasan_kfree_large(ptr, ip);
+ check_page_allocation(ptr, ip);
+
+ /* The object will be poisoned by kasan_poison_pages(). */
}
-void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
+static inline void unpoison_slab_object(struct kmem_cache *cache, void *object,
+ gfp_t flags, bool init)
{
- struct folio *folio;
-
- folio = virt_to_folio(ptr);
-
/*
- * Even though this function is only called for kmem_cache_alloc and
- * kmalloc backed mempool allocations, those allocations can still be
- * !PageSlab() when the size provided to kmalloc is larger than
- * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
+ * Unpoison the whole object. For kmalloc() allocations,
+ * poison_kmalloc_redzone() will do precise poisoning.
*/
- if (unlikely(!folio_test_slab(folio))) {
- if (____kasan_kfree_large(ptr, ip))
- return;
- kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
- } else {
- struct slab *slab = folio_slab(folio);
+ kasan_unpoison(object, cache->object_size, init);
- ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
- }
+ /* Save alloc info (if possible) for non-kmalloc() allocations. */
+ if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
+ kasan_save_alloc_info(cache, object, flags);
}
void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
@@ -308,39 +336,18 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
tag = assign_tag(cache, object, false);
tagged_object = set_tag(object, tag);
- /*
- * Unpoison the whole object.
- * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
- */
- kasan_unpoison(tagged_object, cache->object_size, init);
-
- /* Save alloc info (if possible) for non-kmalloc() allocations. */
- if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
- kasan_save_alloc_info(cache, tagged_object, flags);
+ /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
+ unpoison_slab_object(cache, tagged_object, flags, init);
return tagged_object;
}
-static inline void *____kasan_kmalloc(struct kmem_cache *cache,
+static inline void poison_kmalloc_redzone(struct kmem_cache *cache,
const void *object, size_t size, gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;
- if (gfpflags_allow_blocking(flags))
- kasan_quarantine_reduce();
-
- if (unlikely(object == NULL))
- return NULL;
-
- if (is_kfence_address(kasan_reset_tag(object)))
- return (void *)object;
-
- /*
- * The object has already been unpoisoned by kasan_slab_alloc() for
- * kmalloc() or by kasan_krealloc() for krealloc().
- */
-
/*
* The redzone has byte-level precision for the generic mode.
* Partially poison the last object granule to cover the unaligned
@@ -364,34 +371,34 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
kasan_save_alloc_info(cache, (void *)object, flags);
- /* Keep the tag that was set by kasan_slab_alloc(). */
- return (void *)object;
}
void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
size_t size, gfp_t flags)
{
- return ____kasan_kmalloc(cache, object, size, flags);
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
+ if (unlikely(object == NULL))
+ return NULL;
+
+ if (is_kfence_address(object))
+ return (void *)object;
+
+ /* The object has already been unpoisoned by kasan_slab_alloc(). */
+ poison_kmalloc_redzone(cache, object, size, flags);
+
+ /* Keep the tag that was set by kasan_slab_alloc(). */
+ return (void *)object;
}
EXPORT_SYMBOL(__kasan_kmalloc);
-void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
+static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size,
gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;
- if (gfpflags_allow_blocking(flags))
- kasan_quarantine_reduce();
-
- if (unlikely(ptr == NULL))
- return NULL;
-
- /*
- * The object has already been unpoisoned by kasan_unpoison_pages() for
- * alloc_pages() or by kasan_krealloc() for krealloc().
- */
-
/*
* The redzone has byte-level precision for the generic mode.
* Partially poison the last object granule to cover the unaligned
@@ -401,12 +408,25 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
kasan_poison_last_granule(ptr, size);
/* Poison the aligned part of the redzone. */
- redzone_start = round_up((unsigned long)(ptr + size),
- KASAN_GRANULE_SIZE);
+ redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
kasan_poison((void *)redzone_start, redzone_end - redzone_start,
KASAN_PAGE_REDZONE, false);
+}
+
+void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
+ gfp_t flags)
+{
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
+ if (unlikely(ptr == NULL))
+ return NULL;
+ /* The object has already been unpoisoned by kasan_unpoison_pages(). */
+ poison_kmalloc_large_redzone(ptr, size, flags);
+
+ /* Keep the tag that was set by alloc_pages(). */
return (void *)ptr;
}
@@ -414,9 +434,15 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
{
struct slab *slab;
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
if (unlikely(object == ZERO_SIZE_PTR))
return (void *)object;
+ if (is_kfence_address(object))
+ return (void *)object;
+
/*
* Unpoison the object's data.
* Part of it might already have been unpoisoned, but it's unknown
@@ -428,9 +454,91 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
if (unlikely(!slab))
- return __kasan_kmalloc_large(object, size, flags);
+ poison_kmalloc_large_redzone(object, size, flags);
else
- return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
+ poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
+
+ return (void *)object;
+}
+
+bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
+ unsigned long ip)
+{
+ unsigned long *ptr;
+
+ if (unlikely(PageHighMem(page)))
+ return true;
+
+ /* Bail out if allocation was excluded due to sampling. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+ page_kasan_tag(page) == KASAN_TAG_KERNEL)
+ return true;
+
+ ptr = page_address(page);
+
+ if (check_page_allocation(ptr, ip))
+ return false;
+
+ kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
+
+ return true;
+}
+
+void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
+ unsigned long ip)
+{
+ __kasan_unpoison_pages(page, order, false);
+}
+
+bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
+{
+ struct folio *folio = virt_to_folio(ptr);
+ struct slab *slab;
+
+ /*
+ * This function can be called for large kmalloc allocation that get
+ * their memory from page_alloc. Thus, the folio might not be a slab.
+ */
+ if (unlikely(!folio_test_slab(folio))) {
+ if (check_page_allocation(ptr, ip))
+ return false;
+ kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
+ return true;
+ }
+
+ if (is_kfence_address(ptr))
+ return false;
+
+ slab = folio_slab(folio);
+ return !poison_slab_object(slab->slab_cache, ptr, ip, false);
+}
+
+void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
+{
+ struct slab *slab;
+ gfp_t flags = 0; /* Might be executing under a lock. */
+
+ slab = virt_to_slab(ptr);
+
+ /*
+ * This function can be called for large kmalloc allocation that get
+ * their memory from page_alloc.
+ */
+ if (unlikely(!slab)) {
+ kasan_unpoison(ptr, size, false);
+ poison_kmalloc_large_redzone(ptr, size, flags);
+ return;
+ }
+
+ if (is_kfence_address(ptr))
+ return;
+
+ /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
+ unpoison_slab_object(slab->slab_cache, ptr, size, flags);
+
+ /* Poison the redzone and save alloc info for kmalloc() allocations. */
+ if (is_kmalloc_cache(slab->slab_cache))
+ poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags);
}
bool __kasan_check_byte(const void *address, unsigned long ip)