From 022012dcf44209074af97b6ae531a10c08736b31 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Mon, 20 Nov 2023 18:47:13 +0100 Subject: lib/stackdepot, kasan: add flags to __stack_depot_save and rename Change the bool can_alloc argument of __stack_depot_save to a u32 argument that accepts a set of flags. The following patch will add another flag to stack_depot_save_flags besides the existing STACK_DEPOT_FLAG_CAN_ALLOC. Also rename the function to stack_depot_save_flags, as __stack_depot_save is a cryptic name, Link: https://lkml.kernel.org/r/645fa15239621eebbd3a10331e5864b718839512.1700502145.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Alexander Potapenko Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Cc: Oscar Salvador Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/kasan/common.c | 7 ++++--- mm/kasan/generic.c | 9 +++++---- mm/kasan/kasan.h | 2 +- mm/kasan/tags.c | 3 ++- 4 files changed, 12 insertions(+), 9 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 256930da578a..825a0240ec02 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -37,19 +38,19 @@ struct slab *kasan_addr_to_slab(const void *addr) return NULL; } -depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc) +depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags) { unsigned long entries[KASAN_STACK_DEPTH]; unsigned int nr_entries; nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); - return __stack_depot_save(entries, nr_entries, flags, can_alloc); + return stack_depot_save_flags(entries, nr_entries, flags, depot_flags); } void kasan_set_track(struct kasan_track *track, gfp_t flags) { track->pid = current->pid; - track->stack = kasan_save_stack(flags, true); + track->stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC); } #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 4d837ab83f08..5d168c9afb32 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -472,7 +473,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) sizeof(struct kasan_free_meta) : 0); } -static void __kasan_record_aux_stack(void *addr, bool can_alloc) +static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) { struct slab *slab = kasan_addr_to_slab(addr); struct kmem_cache *cache; @@ -489,17 +490,17 @@ static void __kasan_record_aux_stack(void *addr, bool can_alloc) return; alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; - alloc_meta->aux_stack[0] = kasan_save_stack(0, can_alloc); + alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags); } void kasan_record_aux_stack(void *addr) { - return __kasan_record_aux_stack(addr, true); + return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC); } void kasan_record_aux_stack_noalloc(void *addr) { - return __kasan_record_aux_stack(addr, false); + return __kasan_record_aux_stack(addr, 0); } void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 8b06bab5c406..b29d46b83d1f 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -368,7 +368,7 @@ static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { } #endif -depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc); +depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags); void kasan_set_track(struct kasan_track *track, gfp_t flags); void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags); void kasan_save_free_info(struct kmem_cache *cache, void *object); diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index 7dcfe341d48e..4fd32121b0fd 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -101,7 +102,7 @@ static void save_stack_info(struct kmem_cache *cache, void *object, struct kasan_stack_ring_entry *entry; void *old_ptr; - stack = kasan_save_stack(gfp_flags, true); + stack = kasan_save_stack(gfp_flags, STACK_DEPOT_FLAG_CAN_ALLOC); /* * Prevent save_stack_info() from modifying stack ring -- cgit v1.2.3 From f3b5979862994089005d48ad2ce5b6a9735981fe Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Mon, 20 Nov 2023 18:47:16 +0100 Subject: kasan: remove atomic accesses to stack ring entries Remove the atomic accesses to entry fields in save_stack_info and kasan_complete_mode_report_info for tag-based KASAN modes. These atomics are not required, as the read/write lock prevents the entries from being read (in kasan_complete_mode_report_info) while being written (in save_stack_info) and the try_cmpxchg prevents the same entry from being rewritten (in save_stack_info) in the unlikely case of wrapping during writing. Link: https://lkml.kernel.org/r/29f59126d9845c5257b6c29cd7ad113b16f19f47.1700502145.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Alexander Potapenko Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Cc: Oscar Salvador Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/kasan/report_tags.c | 25 +++++++------------------ mm/kasan/tags.c | 13 +++++-------- 2 files changed, 12 insertions(+), 26 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c index 8b8bfdb3cfdb..78abdcde5da9 100644 --- a/mm/kasan/report_tags.c +++ b/mm/kasan/report_tags.c @@ -31,10 +31,6 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) unsigned long flags; u64 pos; struct kasan_stack_ring_entry *entry; - void *ptr; - u32 pid; - depot_stack_handle_t stack; - bool is_free; bool alloc_found = false, free_found = false; if ((!info->cache || !info->object) && !info->bug_type) { @@ -61,18 +57,11 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) entry = &stack_ring.entries[i % stack_ring.size]; - /* Paired with smp_store_release() in save_stack_info(). */ - ptr = (void *)smp_load_acquire(&entry->ptr); - - if (kasan_reset_tag(ptr) != info->object || - get_tag(ptr) != get_tag(info->access_addr)) + if (kasan_reset_tag(entry->ptr) != info->object || + get_tag(entry->ptr) != get_tag(info->access_addr)) continue; - pid = READ_ONCE(entry->pid); - stack = READ_ONCE(entry->stack); - is_free = READ_ONCE(entry->is_free); - - if (is_free) { + if (entry->is_free) { /* * Second free of the same object. * Give up on trying to find the alloc entry. @@ -80,8 +69,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) if (free_found) break; - info->free_track.pid = pid; - info->free_track.stack = stack; + info->free_track.pid = entry->pid; + info->free_track.stack = entry->stack; free_found = true; /* @@ -95,8 +84,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) if (alloc_found) break; - info->alloc_track.pid = pid; - info->alloc_track.stack = stack; + info->alloc_track.pid = entry->pid; + info->alloc_track.stack = entry->stack; alloc_found = true; /* diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index 4fd32121b0fd..b6c017e670d8 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -121,15 +121,12 @@ next: if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR)) goto next; /* Busy slot. */ - WRITE_ONCE(entry->size, cache->object_size); - WRITE_ONCE(entry->pid, current->pid); - WRITE_ONCE(entry->stack, stack); - WRITE_ONCE(entry->is_free, is_free); + entry->size = cache->object_size; + entry->pid = current->pid; + entry->stack = stack; + entry->is_free = is_free; - /* - * Paired with smp_load_acquire() in kasan_complete_mode_report_info(). - */ - smp_store_release(&entry->ptr, (s64)object); + entry->ptr = object; read_unlock_irqrestore(&stack_ring.lock, flags); } -- cgit v1.2.3 From 7d88e4f768b0fdb85b68f0e4679bb10fdb05c808 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Mon, 20 Nov 2023 18:47:17 +0100 Subject: kasan: check object_size in kasan_complete_mode_report_info Check the object size when looking up entries in the stack ring. If the size of the object for which a report is being printed does not match the size of the object for which a stack trace has been saved in the stack ring, the saved stack trace is irrelevant. Link: https://lkml.kernel.org/r/68c6948175aadd7e7e7deea61725103d64a4528f.1700502145.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Alexander Potapenko Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Cc: Oscar Salvador Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/kasan/report_tags.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'mm/kasan') diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c index 78abdcde5da9..55154743f915 100644 --- a/mm/kasan/report_tags.c +++ b/mm/kasan/report_tags.c @@ -7,6 +7,7 @@ #include #include "kasan.h" +#include "../slab.h" extern struct kasan_stack_ring stack_ring; @@ -58,7 +59,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) entry = &stack_ring.entries[i % stack_ring.size]; if (kasan_reset_tag(entry->ptr) != info->object || - get_tag(entry->ptr) != get_tag(info->access_addr)) + get_tag(entry->ptr) != get_tag(info->access_addr) || + info->cache->object_size != entry->size) continue; if (entry->is_free) { -- cgit v1.2.3 From f816938bff1f772ce7949e5747734be27ecf7f4d Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Mon, 20 Nov 2023 18:47:18 +0100 Subject: kasan: use stack_depot_put for tag-based modes Make tag-based KASAN modes evict stack traces from the stack depot once they are evicted from the stack ring. Internally, pass STACK_DEPOT_FLAG_GET to stack_depot_save_flags (via kasan_save_stack) to increment the refcount when saving a new entry to stack ring and call stack_depot_put when removing an entry from stack ring. Link: https://lkml.kernel.org/r/b4773e5c1b0b9df6826ec0b65c1923feadfa78e5.1700502145.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Alexander Potapenko Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Cc: Oscar Salvador Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/kasan/tags.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index b6c017e670d8..739ae997463d 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -97,12 +97,13 @@ static void save_stack_info(struct kmem_cache *cache, void *object, gfp_t gfp_flags, bool is_free) { unsigned long flags; - depot_stack_handle_t stack; + depot_stack_handle_t stack, old_stack; u64 pos; struct kasan_stack_ring_entry *entry; void *old_ptr; - stack = kasan_save_stack(gfp_flags, STACK_DEPOT_FLAG_CAN_ALLOC); + stack = kasan_save_stack(gfp_flags, + STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET); /* * Prevent save_stack_info() from modifying stack ring @@ -121,6 +122,8 @@ next: if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR)) goto next; /* Busy slot. */ + old_stack = entry->stack; + entry->size = cache->object_size; entry->pid = current->pid; entry->stack = stack; @@ -129,6 +132,9 @@ next: entry->ptr = object; read_unlock_irqrestore(&stack_ring.lock, flags); + + if (old_stack) + stack_depot_put(old_stack); } void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) -- cgit v1.2.3 From 773688a6cb24b0b3c2ba40354d883348a2befa38 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Mon, 20 Nov 2023 18:47:19 +0100 Subject: kasan: use stack_depot_put for Generic mode Evict alloc/free stack traces from the stack depot for Generic KASAN once they are evicted from the quaratine. For auxiliary stack traces, evict the oldest stack trace once a new one is saved (KASAN only keeps references to the last two). Also evict all saved stack traces on krealloc. To avoid double-evicting and mis-evicting stack traces (in case KASAN's metadata was corrupted), reset KASAN's per-object metadata that stores stack depot handles when the object is initialized and when it's evicted from the quarantine. Note that stack_depot_put is no-op if the handle is 0. Link: https://lkml.kernel.org/r/5cef104d9b842899489b4054fe8d1339a71acee0.1700502145.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Oscar Salvador Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/kasan/common.c | 3 ++- mm/kasan/generic.c | 22 ++++++++++++++++++---- mm/kasan/quarantine.c | 26 ++++++++++++++++++++------ 3 files changed, 40 insertions(+), 11 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 825a0240ec02..b5d8bd26fced 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -50,7 +50,8 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags) void kasan_set_track(struct kasan_track *track, gfp_t flags) { track->pid = current->pid; - track->stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC); + track->stack = kasan_save_stack(flags, + STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET); } #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 5d168c9afb32..50cc519e23f4 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -449,10 +449,14 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { struct kasan_alloc_meta *alloc_meta; + struct kasan_free_meta *free_meta; alloc_meta = kasan_get_alloc_meta(cache, object); if (alloc_meta) __memset(alloc_meta, 0, sizeof(*alloc_meta)); + free_meta = kasan_get_free_meta(cache, object); + if (free_meta) + __memset(free_meta, 0, sizeof(*free_meta)); } size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) @@ -489,18 +493,20 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) if (!alloc_meta) return; + stack_depot_put(alloc_meta->aux_stack[1]); alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags); } void kasan_record_aux_stack(void *addr) { - return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC); + return __kasan_record_aux_stack(addr, + STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET); } void kasan_record_aux_stack_noalloc(void *addr) { - return __kasan_record_aux_stack(addr, 0); + return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET); } void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) @@ -508,8 +514,16 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) struct kasan_alloc_meta *alloc_meta; alloc_meta = kasan_get_alloc_meta(cache, object); - if (alloc_meta) - kasan_set_track(&alloc_meta->alloc_track, flags); + if (!alloc_meta) + return; + + /* Evict previous stack traces (might exist for krealloc). */ + stack_depot_put(alloc_meta->alloc_track.stack); + stack_depot_put(alloc_meta->aux_stack[0]); + stack_depot_put(alloc_meta->aux_stack[1]); + __memset(alloc_meta, 0, sizeof(*alloc_meta)); + + kasan_set_track(&alloc_meta->alloc_track, flags); } void kasan_save_free_info(struct kmem_cache *cache, void *object) diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index ca4529156735..265ca2bbe2dd 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -143,11 +143,22 @@ static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) { void *object = qlink_to_object(qlink, cache); - struct kasan_free_meta *meta = kasan_get_free_meta(cache, object); + struct kasan_alloc_meta *alloc_meta = kasan_get_alloc_meta(cache, object); + struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object); unsigned long flags; - if (IS_ENABLED(CONFIG_SLAB)) - local_irq_save(flags); + if (alloc_meta) { + stack_depot_put(alloc_meta->alloc_track.stack); + stack_depot_put(alloc_meta->aux_stack[0]); + stack_depot_put(alloc_meta->aux_stack[1]); + __memset(alloc_meta, 0, sizeof(*alloc_meta)); + } + + if (free_meta && + *(u8 *)kasan_mem_to_shadow(object) == KASAN_SLAB_FREETRACK) { + stack_depot_put(free_meta->free_track.stack); + free_meta->free_track.stack = 0; + } /* * If init_on_free is enabled and KASAN's free metadata is stored in @@ -157,14 +168,17 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) */ if (slab_want_init_on_free(cache) && cache->kasan_info.free_meta_offset == 0) - memzero_explicit(meta, sizeof(*meta)); + memzero_explicit(free_meta, sizeof(*free_meta)); /* - * As the object now gets freed from the quarantine, assume that its - * free track is no longer valid. + * As the object now gets freed from the quarantine, + * take note that its free track is no longer exists. */ *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; + if (IS_ENABLED(CONFIG_SLAB)) + local_irq_save(flags); + ___cache_free(cache, object, _THIS_IP_); if (IS_ENABLED(CONFIG_SLAB)) -- cgit v1.2.3 From a5989d4ed40cef0cadede2393c714a1ff9179f65 Mon Sep 17 00:00:00 2001 From: Juntong Deng Date: Mon, 20 Nov 2023 04:46:29 +0800 Subject: kasan: improve free meta storage in Generic KASAN Currently free meta can only be stored in object if the object is not smaller than free meta. After the improvement, when the object is smaller than free meta and SLUB DEBUG is not enabled, it is possible to store part of the free meta in the object, reducing the increased size of the red zone. Example: free meta size: 16 bytes alloc meta size: 16 bytes object size: 8 bytes optimal redzone size (object_size <= 64): 16 bytes Before improvement: actual redzone size = alloc meta size + free meta size = 32 bytes After improvement: actual redzone size = alloc meta size + (free meta size - object size) = 24 bytes [juntong.deng@outlook.com: make kasan_metadata_size() adapt to the improved free meta storage] Link: https://lkml.kernel.org/r/VI1P193MB0752675D6E0A2D16CE656F8299BAA@VI1P193MB0752.EURP193.PROD.OUTLOOK.COM Link: https://lkml.kernel.org/r/VI1P193MB0752DE2CCD9046B5FED0AA8E99B5A@VI1P193MB0752.EURP193.PROD.OUTLOOK.COM Signed-off-by: Juntong Deng Suggested-by: Dmitry Vyukov Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Vincenzo Frascino Signed-off-by: Andrew Morton --- mm/kasan/generic.c | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 50cc519e23f4..54e20b2bc3e1 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -362,6 +362,8 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, { unsigned int ok_size; unsigned int optimal_size; + unsigned int rem_free_meta_size; + unsigned int orig_alloc_meta_offset; if (!kasan_requires_meta()) return; @@ -395,6 +397,9 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, /* Continue, since free meta might still fit. */ } + ok_size = *size; + orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset; + /* * Add free meta into redzone when it's not possible to store * it in the object. This is the case when: @@ -402,23 +407,37 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, * be touched after it was freed, or * 2. Object has a constructor, which means it's expected to * retain its content until the next allocation, or - * 3. Object is too small. + * 3. Object is too small and SLUB DEBUG is enabled. Avoid + * free meta that exceeds the object size corrupts the + * SLUB DEBUG metadata. * Otherwise cache->kasan_info.free_meta_offset = 0 is implied. + * If the object is smaller than the free meta and SLUB DEBUG + * is not enabled, it is still possible to store part of the + * free meta in the object. */ - if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor || - cache->object_size < sizeof(struct kasan_free_meta)) { - ok_size = *size; - + if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) { cache->kasan_info.free_meta_offset = *size; *size += sizeof(struct kasan_free_meta); - - /* If free meta doesn't fit, don't add it. */ - if (*size > KMALLOC_MAX_SIZE) { - cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; - *size = ok_size; + } else if (cache->object_size < sizeof(struct kasan_free_meta)) { + if (__slub_debug_enabled()) { + cache->kasan_info.free_meta_offset = *size; + *size += sizeof(struct kasan_free_meta); + } else { + rem_free_meta_size = sizeof(struct kasan_free_meta) - + cache->object_size; + *size += rem_free_meta_size; + if (cache->kasan_info.alloc_meta_offset != 0) + cache->kasan_info.alloc_meta_offset += rem_free_meta_size; } } + /* If free meta doesn't fit, don't add it. */ + if (*size > KMALLOC_MAX_SIZE) { + cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; + cache->kasan_info.alloc_meta_offset = orig_alloc_meta_offset; + *size = ok_size; + } + /* Calculate size with optimal redzone. */ optimal_size = cache->object_size + optimal_redzone(cache->object_size); /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */ -- cgit v1.2.3 From 5d4c6ac94694096cdfb528f05a3019a1a423b3a4 Mon Sep 17 00:00:00 2001 From: Juntong Deng Date: Mon, 27 Nov 2023 21:17:31 +0000 Subject: kasan: record and report more information Record and report more information to help us find the cause of the bug and to help us correlate the error with other system events. This patch adds recording and showing CPU number and timestamp at allocation and free (controlled by CONFIG_KASAN_EXTRA_INFO). The timestamps in the report use the same format and source as printk. Error occurrence timestamp is already implicit in the printk log, and CPU number is already shown by dump_stack_lvl, so there is no need to add it. In order to record CPU number and timestamp at allocation and free, corresponding members need to be added to the relevant data structures, which will lead to increased memory consumption. In Generic KASAN, members are added to struct kasan_track. Since in most cases, alloc meta is stored in the redzone and free meta is stored in the object or the redzone, memory consumption will not increase much. In SW_TAGS KASAN and HW_TAGS KASAN, members are added to struct kasan_stack_ring_entry. Memory consumption increases as the size of struct kasan_stack_ring_entry increases (this part of the memory is allocated by memblock), but since this is configurable, it is up to the user to choose. Link: https://lkml.kernel.org/r/VI1P193MB0752BD991325D10E4AB1913599BDA@VI1P193MB0752.EURP193.PROD.OUTLOOK.COM Signed-off-by: Juntong Deng Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Vincenzo Frascino Signed-off-by: Andrew Morton --- lib/Kconfig.kasan | 21 +++++++++++++++++++++ mm/kasan/common.c | 8 ++++++++ mm/kasan/kasan.h | 8 ++++++++ mm/kasan/report.c | 12 ++++++++++++ mm/kasan/report_tags.c | 15 +++++++++++++++ mm/kasan/tags.c | 15 +++++++++++++++ 6 files changed, 79 insertions(+) (limited to 'mm/kasan') diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 935eda08b1e1..8653f5c38be7 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -207,4 +207,25 @@ config KASAN_MODULE_TEST A part of the KASAN test suite that is not integrated with KUnit. Incompatible with Hardware Tag-Based KASAN. +config KASAN_EXTRA_INFO + bool "Record and report more information" + depends on KASAN + help + Record and report more information to help us find the cause of the + bug and to help us correlate the error with other system events. + + Currently, the CPU number and timestamp are additionally + recorded for each heap block at allocation and free time, and + 8 bytes will be added to each metadata structure that records + allocation or free information. + + In Generic KASAN, each kmalloc-8 and kmalloc-16 object will add + 16 bytes of additional memory consumption, and each kmalloc-32 + object will add 8 bytes of additional memory consumption, not + affecting other larger objects. + + In SW_TAGS KASAN and HW_TAGS KASAN, depending on the stack_ring_size + boot parameter, it will add 8 * stack_ring_size bytes of additional + memory consumption. + endif # KASAN diff --git a/mm/kasan/common.c b/mm/kasan/common.c index b5d8bd26fced..fe6c4b43ad9f 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -49,6 +50,13 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags) void kasan_set_track(struct kasan_track *track, gfp_t flags) { +#ifdef CONFIG_KASAN_EXTRA_INFO + u32 cpu = raw_smp_processor_id(); + u64 ts_nsec = local_clock(); + + track->cpu = cpu; + track->timestamp = ts_nsec >> 3; +#endif /* CONFIG_KASAN_EXTRA_INFO */ track->pid = current->pid; track->stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET); diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index b29d46b83d1f..5e298e3ac909 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -187,6 +187,10 @@ static inline bool kasan_requires_meta(void) struct kasan_track { u32 pid; depot_stack_handle_t stack; +#ifdef CONFIG_KASAN_EXTRA_INFO + u64 cpu:20; + u64 timestamp:44; +#endif /* CONFIG_KASAN_EXTRA_INFO */ }; enum kasan_report_type { @@ -278,6 +282,10 @@ struct kasan_stack_ring_entry { u32 pid; depot_stack_handle_t stack; bool is_free; +#ifdef CONFIG_KASAN_EXTRA_INFO + u64 cpu:20; + u64 timestamp:44; +#endif /* CONFIG_KASAN_EXTRA_INFO */ }; struct kasan_stack_ring { diff --git a/mm/kasan/report.c b/mm/kasan/report.c index e77facb62900..a938237f6882 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -262,7 +262,19 @@ static void print_error_description(struct kasan_report_info *info) static void print_track(struct kasan_track *track, const char *prefix) { +#ifdef CONFIG_KASAN_EXTRA_INFO + u64 ts_nsec = track->timestamp; + unsigned long rem_usec; + + ts_nsec <<= 3; + rem_usec = do_div(ts_nsec, NSEC_PER_SEC) / 1000; + + pr_err("%s by task %u on cpu %d at %lu.%06lus:\n", + prefix, track->pid, track->cpu, + (unsigned long)ts_nsec, rem_usec); +#else pr_err("%s by task %u:\n", prefix, track->pid); +#endif /* CONFIG_KASAN_EXTRA_INFO */ if (track->stack) stack_depot_print(track->stack); else diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c index 55154743f915..979f284c2497 100644 --- a/mm/kasan/report_tags.c +++ b/mm/kasan/report_tags.c @@ -27,6 +27,15 @@ static const char *get_common_bug_type(struct kasan_report_info *info) return "invalid-access"; } +#ifdef CONFIG_KASAN_EXTRA_INFO +static void kasan_complete_extra_report_info(struct kasan_track *track, + struct kasan_stack_ring_entry *entry) +{ + track->cpu = entry->cpu; + track->timestamp = entry->timestamp; +} +#endif /* CONFIG_KASAN_EXTRA_INFO */ + void kasan_complete_mode_report_info(struct kasan_report_info *info) { unsigned long flags; @@ -73,6 +82,9 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) info->free_track.pid = entry->pid; info->free_track.stack = entry->stack; +#ifdef CONFIG_KASAN_EXTRA_INFO + kasan_complete_extra_report_info(&info->free_track, entry); +#endif /* CONFIG_KASAN_EXTRA_INFO */ free_found = true; /* @@ -88,6 +100,9 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) info->alloc_track.pid = entry->pid; info->alloc_track.stack = entry->stack; +#ifdef CONFIG_KASAN_EXTRA_INFO + kasan_complete_extra_report_info(&info->alloc_track, entry); +#endif /* CONFIG_KASAN_EXTRA_INFO */ alloc_found = true; /* diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index 739ae997463d..c13b198b8302 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -93,6 +94,17 @@ void __init kasan_init_tags(void) } } +#ifdef CONFIG_KASAN_EXTRA_INFO +static void save_extra_info(struct kasan_stack_ring_entry *entry) +{ + u32 cpu = raw_smp_processor_id(); + u64 ts_nsec = local_clock(); + + entry->cpu = cpu; + entry->timestamp = ts_nsec >> 3; +} +#endif /* CONFIG_KASAN_EXTRA_INFO */ + static void save_stack_info(struct kmem_cache *cache, void *object, gfp_t gfp_flags, bool is_free) { @@ -128,6 +140,9 @@ next: entry->pid = current->pid; entry->stack = stack; entry->is_free = is_free; +#ifdef CONFIG_KASAN_EXTRA_INFO + save_extra_info(entry); +#endif /* CONFIG_KASAN_EXTRA_INFO */ entry->ptr = object; -- cgit v1.2.3 From 280ec6ccb6422aa4a04f9ac4216ddcf055acc95d Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:28:45 +0100 Subject: kasan: rename kasan_slab_free_mempool to kasan_mempool_poison_object Patch series "kasan: save mempool stack traces". This series updates KASAN to save alloc and free stack traces for secondary-level allocators that cache and reuse allocations internally instead of giving them back to the underlying allocator (e.g. mempool). As a part of this change, introduce and document a set of KASAN hooks: bool kasan_mempool_poison_pages(struct page *page, unsigned int order); void kasan_mempool_unpoison_pages(struct page *page, unsigned int order); bool kasan_mempool_poison_object(void *ptr); void kasan_mempool_unpoison_object(void *ptr, size_t size); and use them in the mempool code. Besides mempool, skbuff and io_uring also cache allocations and already use KASAN hooks to poison those. Their code is updated to use the new mempool hooks. The new hooks save alloc and free stack traces (for normal kmalloc and slab objects; stack traces for large kmalloc objects and page_alloc are not supported by KASAN yet), improve the readability of the users' code, and also allow the users to prevent double-free and invalid-free bugs; see the patches for the details. This patch (of 21): Rename kasan_slab_free_mempool to kasan_mempool_poison_object. kasan_slab_free_mempool is a slightly confusing name: it is unclear whether this function poisons the object when it is freed into mempool or does something when the object is freed from mempool to the underlying allocator. The new name also aligns with other mempool-related KASAN hooks added in the following patches in this series. Link: https://lkml.kernel.org/r/cover.1703024586.git.andreyknvl@google.com Link: https://lkml.kernel.org/r/c5618685abb7cdbf9fb4897f565e7759f601da84.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- include/linux/kasan.h | 8 ++++---- io_uring/alloc_cache.h | 3 +-- mm/kasan/common.c | 4 ++-- mm/mempool.c | 2 +- 4 files changed, 8 insertions(+), 9 deletions(-) (limited to 'mm/kasan') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 72cb693b075b..6310435f528b 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -172,11 +172,11 @@ static __always_inline void kasan_kfree_large(void *ptr) __kasan_kfree_large(ptr, _RET_IP_); } -void __kasan_slab_free_mempool(void *ptr, unsigned long ip); -static __always_inline void kasan_slab_free_mempool(void *ptr) +void __kasan_mempool_poison_object(void *ptr, unsigned long ip); +static __always_inline void kasan_mempool_poison_object(void *ptr) { if (kasan_enabled()) - __kasan_slab_free_mempool(ptr, _RET_IP_); + __kasan_mempool_poison_object(ptr, _RET_IP_); } void * __must_check __kasan_slab_alloc(struct kmem_cache *s, @@ -256,7 +256,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init return false; } static inline void kasan_kfree_large(void *ptr) {} -static inline void kasan_slab_free_mempool(void *ptr) {} +static inline void kasan_mempool_poison_object(void *ptr) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags, bool init) { diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h index 241245cb54a6..8de0414e8efe 100644 --- a/io_uring/alloc_cache.h +++ b/io_uring/alloc_cache.h @@ -16,8 +16,7 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, if (cache->nr_cached < cache->max_cached) { cache->nr_cached++; wq_stack_add_head(&entry->node, &cache->list); - /* KASAN poisons object */ - kasan_slab_free_mempool(entry); + kasan_mempool_poison_object(entry); return true; } return false; diff --git a/mm/kasan/common.c b/mm/kasan/common.c index fe6c4b43ad9f..e0394d0ee7f1 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -271,7 +271,7 @@ static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) /* * The object will be poisoned by kasan_poison_pages() or - * kasan_slab_free_mempool(). + * kasan_mempool_poison_object(). */ return false; @@ -282,7 +282,7 @@ void __kasan_kfree_large(void *ptr, unsigned long ip) ____kasan_kfree_large(ptr, ip); } -void __kasan_slab_free_mempool(void *ptr, unsigned long ip) +void __kasan_mempool_poison_object(void *ptr, unsigned long ip) { struct folio *folio; diff --git a/mm/mempool.c b/mm/mempool.c index b3d2084fd989..7e1c729f292b 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -107,7 +107,7 @@ static inline void poison_element(mempool_t *pool, void *element) static __always_inline void kasan_poison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) - kasan_slab_free_mempool(element); + kasan_mempool_poison_object(element); else if (pool->alloc == mempool_alloc_pages) kasan_poison_pages(element, (unsigned long)pool->pool_data, false); -- cgit v1.2.3 From 9b94fe91099cbf05606151ef05bea9632666f5d5 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:28:46 +0100 Subject: kasan: move kasan_mempool_poison_object Move kasan_mempool_poison_object after all slab-related KASAN hooks. This is a preparatory change for the following patches in this series. No functional changes. Link: https://lkml.kernel.org/r/23ea215409f43c13cdf9ecc454501a264c107d67.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- include/linux/kasan.h | 16 ++++++++-------- mm/kasan/common.c | 46 +++++++++++++++++++++++----------------------- 2 files changed, 31 insertions(+), 31 deletions(-) (limited to 'mm/kasan') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 6310435f528b..0d1f925c136d 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -172,13 +172,6 @@ static __always_inline void kasan_kfree_large(void *ptr) __kasan_kfree_large(ptr, _RET_IP_); } -void __kasan_mempool_poison_object(void *ptr, unsigned long ip); -static __always_inline void kasan_mempool_poison_object(void *ptr) -{ - if (kasan_enabled()) - __kasan_mempool_poison_object(ptr, _RET_IP_); -} - void * __must_check __kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags, bool init); static __always_inline void * __must_check kasan_slab_alloc( @@ -219,6 +212,13 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, return (void *)object; } +void __kasan_mempool_poison_object(void *ptr, unsigned long ip); +static __always_inline void kasan_mempool_poison_object(void *ptr) +{ + if (kasan_enabled()) + __kasan_mempool_poison_object(ptr, _RET_IP_); +} + /* * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for * the hardware tag-based mode that doesn't rely on compiler instrumentation. @@ -256,7 +256,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init return false; } static inline void kasan_kfree_large(void *ptr) {} -static inline void kasan_mempool_poison_object(void *ptr) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags, bool init) { @@ -276,6 +275,7 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, { return (void *)object; } +static inline void kasan_mempool_poison_object(void *ptr) {} static inline bool kasan_check_byte(const void *address) { return true; diff --git a/mm/kasan/common.c b/mm/kasan/common.c index e0394d0ee7f1..fc7f711607e1 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -282,29 +282,6 @@ void __kasan_kfree_large(void *ptr, unsigned long ip) ____kasan_kfree_large(ptr, ip); } -void __kasan_mempool_poison_object(void *ptr, unsigned long ip) -{ - struct folio *folio; - - folio = virt_to_folio(ptr); - - /* - * Even though this function is only called for kmem_cache_alloc and - * kmalloc backed mempool allocations, those allocations can still be - * !PageSlab() when the size provided to kmalloc is larger than - * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. - */ - if (unlikely(!folio_test_slab(folio))) { - if (____kasan_kfree_large(ptr, ip)) - return; - kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); - } else { - struct slab *slab = folio_slab(folio); - - ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); - } -} - void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags, bool init) { @@ -452,6 +429,29 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag return ____kasan_kmalloc(slab->slab_cache, object, size, flags); } +void __kasan_mempool_poison_object(void *ptr, unsigned long ip) +{ + struct folio *folio; + + folio = virt_to_folio(ptr); + + /* + * Even though this function is only called for kmem_cache_alloc and + * kmalloc backed mempool allocations, those allocations can still be + * !PageSlab() when the size provided to kmalloc is larger than + * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. + */ + if (unlikely(!folio_test_slab(folio))) { + if (____kasan_kfree_large(ptr, ip)) + return; + kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); + } else { + struct slab *slab = folio_slab(folio); + + ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); + } +} + bool __kasan_check_byte(const void *address, unsigned long ip) { if (!kasan_byte_accessible(address)) { -- cgit v1.2.3 From 2e7c954c11af96aa1e0566a706f22152ef91d759 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:28:48 +0100 Subject: kasan: add return value for kasan_mempool_poison_object Add a return value for kasan_mempool_poison_object that lets the caller know whether the allocation is affected by a double-free or an invalid-free bug. The caller can use this return value to stop operating on the object. Also introduce a check_page_allocation helper function to improve the code readability. Link: https://lkml.kernel.org/r/618af65273875fb9f56954285443279b15f1fcd9.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- include/linux/kasan.h | 17 ++++++++++++----- mm/kasan/common.c | 21 ++++++++++----------- 2 files changed, 22 insertions(+), 16 deletions(-) (limited to 'mm/kasan') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index bbf6e2fa4ffd..33387e254caa 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -212,7 +212,7 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, return (void *)object; } -void __kasan_mempool_poison_object(void *ptr, unsigned long ip); +bool __kasan_mempool_poison_object(void *ptr, unsigned long ip); /** * kasan_mempool_poison_object - Check and poison a mempool slab allocation. * @ptr: Pointer to the slab allocation. @@ -225,16 +225,20 @@ void __kasan_mempool_poison_object(void *ptr, unsigned long ip); * without putting it into the quarantine (for the Generic mode). * * This function also performs checks to detect double-free and invalid-free - * bugs and reports them. + * bugs and reports them. The caller can use the return value of this function + * to find out if the allocation is buggy. * * This function operates on all slab allocations including large kmalloc * allocations (the ones returned by kmalloc_large() or by kmalloc() with the * size > KMALLOC_MAX_SIZE). + * + * Return: true if the allocation can be safely reused; false otherwise. */ -static __always_inline void kasan_mempool_poison_object(void *ptr) +static __always_inline bool kasan_mempool_poison_object(void *ptr) { if (kasan_enabled()) - __kasan_mempool_poison_object(ptr, _RET_IP_); + return __kasan_mempool_poison_object(ptr, _RET_IP_); + return true; } /* @@ -293,7 +297,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, { return (void *)object; } -static inline void kasan_mempool_poison_object(void *ptr) {} +static inline bool kasan_mempool_poison_object(void *ptr) +{ + return true; +} static inline bool kasan_check_byte(const void *address) { return true; diff --git a/mm/kasan/common.c b/mm/kasan/common.c index fc7f711607e1..2b4869de4985 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -254,7 +254,7 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, return ____kasan_slab_free(cache, object, ip, true, init); } -static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) +static inline bool check_page_allocation(void *ptr, unsigned long ip) { if (!kasan_arch_is_ready()) return false; @@ -269,17 +269,14 @@ static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) return true; } - /* - * The object will be poisoned by kasan_poison_pages() or - * kasan_mempool_poison_object(). - */ - return false; } void __kasan_kfree_large(void *ptr, unsigned long ip) { - ____kasan_kfree_large(ptr, ip); + check_page_allocation(ptr, ip); + + /* The object will be poisoned by kasan_poison_pages(). */ } void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, @@ -429,7 +426,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag return ____kasan_kmalloc(slab->slab_cache, object, size, flags); } -void __kasan_mempool_poison_object(void *ptr, unsigned long ip) +bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) { struct folio *folio; @@ -442,13 +439,15 @@ void __kasan_mempool_poison_object(void *ptr, unsigned long ip) * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. */ if (unlikely(!folio_test_slab(folio))) { - if (____kasan_kfree_large(ptr, ip)) - return; + if (check_page_allocation(ptr, ip)) + return false; kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); + return true; } else { struct slab *slab = folio_slab(folio); - ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); + return !____kasan_slab_free(slab->slab_cache, ptr, ip, + false, false); } } -- cgit v1.2.3 From 1956832753735b1c399b86b2c66cb7c317dc9f31 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:28:49 +0100 Subject: kasan: introduce kasan_mempool_unpoison_object Introduce and document a kasan_mempool_unpoison_object hook. This hook serves as a replacement for the generic kasan_unpoison_range that the mempool code relies on right now. mempool will be updated to use the new hook in one of the following patches. For now, define the new hook to be identical to kasan_unpoison_range. One of the following patches will update it to add stack trace collection. Link: https://lkml.kernel.org/r/dae25f0e18ed8fd50efe509c5b71a0592de5c18d.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- include/linux/kasan.h | 31 +++++++++++++++++++++++++++++++ mm/kasan/common.c | 5 +++++ 2 files changed, 36 insertions(+) (limited to 'mm/kasan') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 33387e254caa..c5fe303bc1c2 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -228,6 +228,9 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip); * bugs and reports them. The caller can use the return value of this function * to find out if the allocation is buggy. * + * Before the poisoned allocation can be reused, it must be unpoisoned via + * kasan_mempool_unpoison_object(). + * * This function operates on all slab allocations including large kmalloc * allocations (the ones returned by kmalloc_large() or by kmalloc() with the * size > KMALLOC_MAX_SIZE). @@ -241,6 +244,32 @@ static __always_inline bool kasan_mempool_poison_object(void *ptr) return true; } +void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip); +/** + * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation. + * @ptr: Pointer to the slab allocation. + * @size: Size to be unpoisoned. + * + * This function is intended for kernel subsystems that cache slab allocations + * to reuse them instead of freeing them back to the slab allocator (e.g. + * mempool). + * + * This function unpoisons a slab allocation that was previously poisoned via + * kasan_mempool_poison_object() without initializing its memory. For the + * tag-based modes, this function does not assign a new tag to the allocation + * and instead restores the original tags based on the pointer value. + * + * This function operates on all slab allocations including large kmalloc + * allocations (the ones returned by kmalloc_large() or by kmalloc() with the + * size > KMALLOC_MAX_SIZE). + */ +static __always_inline void kasan_mempool_unpoison_object(void *ptr, + size_t size) +{ + if (kasan_enabled()) + __kasan_mempool_unpoison_object(ptr, size, _RET_IP_); +} + /* * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for * the hardware tag-based mode that doesn't rely on compiler instrumentation. @@ -301,6 +330,8 @@ static inline bool kasan_mempool_poison_object(void *ptr) { return true; } +static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {} + static inline bool kasan_check_byte(const void *address) { return true; diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 2b4869de4985..4b85d35bb8ab 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -451,6 +451,11 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) } } +void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) +{ + kasan_unpoison(ptr, size, false); +} + bool __kasan_check_byte(const void *address, unsigned long ip) { if (!kasan_byte_accessible(address)) { -- cgit v1.2.3 From f129c31039283df884913142b0f3797d64d3a9d6 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:28:50 +0100 Subject: kasan: introduce kasan_mempool_poison_pages Introduce and document a kasan_mempool_poison_pages hook to be used by the mempool code instead of kasan_poison_pages. Compated to kasan_poison_pages, the new hook: 1. For the tag-based modes, skips checking and poisoning allocations that were not tagged due to sampling. 2. Checks for double-free and invalid-free bugs. In the future, kasan_poison_pages can also be updated to handle #2, but this is out-of-scope of this series. Link: https://lkml.kernel.org/r/88dc7340cce28249abf789f6e0c792c317df9ba5.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- include/linux/kasan.h | 27 +++++++++++++++++++++++++++ mm/kasan/common.c | 23 +++++++++++++++++++++++ 2 files changed, 50 insertions(+) (limited to 'mm/kasan') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index c5fe303bc1c2..de2a695ad34d 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -212,6 +212,29 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, return (void *)object; } +bool __kasan_mempool_poison_pages(struct page *page, unsigned int order, + unsigned long ip); +/** + * kasan_mempool_poison_pages - Check and poison a mempool page allocation. + * @page: Pointer to the page allocation. + * @order: Order of the allocation. + * + * This function is intended for kernel subsystems that cache page allocations + * to reuse them instead of freeing them back to page_alloc (e.g. mempool). + * + * This function is similar to kasan_mempool_poison_object() but operates on + * page allocations. + * + * Return: true if the allocation can be safely reused; false otherwise. + */ +static __always_inline bool kasan_mempool_poison_pages(struct page *page, + unsigned int order) +{ + if (kasan_enabled()) + return __kasan_mempool_poison_pages(page, order, _RET_IP_); + return true; +} + bool __kasan_mempool_poison_object(void *ptr, unsigned long ip); /** * kasan_mempool_poison_object - Check and poison a mempool slab allocation. @@ -326,6 +349,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, { return (void *)object; } +static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order) +{ + return true; +} static inline bool kasan_mempool_poison_object(void *ptr) { return true; diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 4b85d35bb8ab..b416f4c265a4 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -426,6 +426,29 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag return ____kasan_kmalloc(slab->slab_cache, object, size, flags); } +bool __kasan_mempool_poison_pages(struct page *page, unsigned int order, + unsigned long ip) +{ + unsigned long *ptr; + + if (unlikely(PageHighMem(page))) + return true; + + /* Bail out if allocation was excluded due to sampling. */ + if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && + page_kasan_tag(page) == KASAN_TAG_KERNEL) + return true; + + ptr = page_address(page); + + if (check_page_allocation(ptr, ip)) + return false; + + kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false); + + return true; +} + bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) { struct folio *folio; -- cgit v1.2.3 From 9f41c59ae3163690868a32bd77e9e33c3bab555e Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:28:51 +0100 Subject: kasan: introduce kasan_mempool_unpoison_pages Introduce and document a new kasan_mempool_unpoison_pages hook to be used by the mempool code instead of kasan_unpoison_pages. This hook is not functionally different from kasan_unpoison_pages, but using it improves the mempool code readability. Link: https://lkml.kernel.org/r/239bd9af6176f2cc59f5c25893eb36143184daff.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- include/linux/kasan.h | 25 +++++++++++++++++++++++++ mm/kasan/common.c | 6 ++++++ 2 files changed, 31 insertions(+) (limited to 'mm/kasan') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index de2a695ad34d..f8ebde384bd7 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -225,6 +225,9 @@ bool __kasan_mempool_poison_pages(struct page *page, unsigned int order, * This function is similar to kasan_mempool_poison_object() but operates on * page allocations. * + * Before the poisoned allocation can be reused, it must be unpoisoned via + * kasan_mempool_unpoison_pages(). + * * Return: true if the allocation can be safely reused; false otherwise. */ static __always_inline bool kasan_mempool_poison_pages(struct page *page, @@ -235,6 +238,27 @@ static __always_inline bool kasan_mempool_poison_pages(struct page *page, return true; } +void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order, + unsigned long ip); +/** + * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation. + * @page: Pointer to the page allocation. + * @order: Order of the allocation. + * + * This function is intended for kernel subsystems that cache page allocations + * to reuse them instead of freeing them back to page_alloc (e.g. mempool). + * + * This function unpoisons a page allocation that was previously poisoned by + * kasan_mempool_poison_pages() without zeroing the allocation's memory. For + * the tag-based modes, this function assigns a new tag to the allocation. + */ +static __always_inline void kasan_mempool_unpoison_pages(struct page *page, + unsigned int order) +{ + if (kasan_enabled()) + __kasan_mempool_unpoison_pages(page, order, _RET_IP_); +} + bool __kasan_mempool_poison_object(void *ptr, unsigned long ip); /** * kasan_mempool_poison_object - Check and poison a mempool slab allocation. @@ -353,6 +377,7 @@ static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int or { return true; } +static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {} static inline bool kasan_mempool_poison_object(void *ptr) { return true; diff --git a/mm/kasan/common.c b/mm/kasan/common.c index b416f4c265a4..7ebc001d0fcd 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -449,6 +449,12 @@ bool __kasan_mempool_poison_pages(struct page *page, unsigned int order, return true; } +void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order, + unsigned long ip) +{ + __kasan_unpoison_pages(page, order, false); +} + bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) { struct folio *folio; -- cgit v1.2.3 From cf0da2afe3dc2462b07fc951fa335a318eb38775 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:28:52 +0100 Subject: kasan: clean up __kasan_mempool_poison_object Reorganize the code and reword the comment in __kasan_mempool_poison_object to improve the code readability. Link: https://lkml.kernel.org/r/4f6fc8840512286c1a96e16e86901082c671677d.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/common.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 7ebc001d0fcd..3f4a1ed69e03 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -457,27 +457,22 @@ void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order, bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) { - struct folio *folio; - - folio = virt_to_folio(ptr); + struct folio *folio = virt_to_folio(ptr); + struct slab *slab; /* - * Even though this function is only called for kmem_cache_alloc and - * kmalloc backed mempool allocations, those allocations can still be - * !PageSlab() when the size provided to kmalloc is larger than - * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. + * This function can be called for large kmalloc allocation that get + * their memory from page_alloc. Thus, the folio might not be a slab. */ if (unlikely(!folio_test_slab(folio))) { if (check_page_allocation(ptr, ip)) return false; kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); return true; - } else { - struct slab *slab = folio_slab(folio); - - return !____kasan_slab_free(slab->slab_cache, ptr, ip, - false, false); } + + slab = folio_slab(folio); + return !____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); } void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) -- cgit v1.2.3 From b556a462eb8df6b6836c318d23f43409c40a7c7e Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:28:53 +0100 Subject: kasan: save free stack traces for slab mempools Make kasan_mempool_poison_object save free stack traces for slab and kmalloc mempools when the object is freed into the mempool. Also simplify and rename ____kasan_slab_free to poison_slab_object and do a few other reability changes. Link: https://lkml.kernel.org/r/413a7c7c3344fb56809853339ffaabc9e4905e94.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- include/linux/kasan.h | 5 +++-- mm/kasan/common.c | 20 +++++++++----------- 2 files changed, 12 insertions(+), 13 deletions(-) (limited to 'mm/kasan') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index f8ebde384bd7..e636a00e26ba 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -268,8 +268,9 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip); * to reuse them instead of freeing them back to the slab allocator (e.g. * mempool). * - * This function poisons a slab allocation without initializing its memory and - * without putting it into the quarantine (for the Generic mode). + * This function poisons a slab allocation and saves a free stack trace for it + * without initializing the allocation's memory and without putting it into the + * quarantine (for the Generic mode). * * This function also performs checks to detect double-free and invalid-free * bugs and reports them. The caller can use the return value of this function diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 3f4a1ed69e03..59146886e57d 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -207,8 +207,8 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, return (void *)object; } -static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, - unsigned long ip, bool quarantine, bool init) +static inline bool poison_slab_object(struct kmem_cache *cache, void *object, + unsigned long ip, bool init) { void *tagged_object; @@ -221,13 +221,12 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, if (is_kfence_address(object)) return false; - if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != - object)) { + if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) { kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE); return true; } - /* RCU slabs could be legally used after free within the RCU period */ + /* RCU slabs could be legally used after free within the RCU period. */ if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) return false; @@ -239,19 +238,18 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), KASAN_SLAB_FREE, init); - if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine)) - return false; - if (kasan_stack_collection_enabled()) kasan_save_free_info(cache, tagged_object); - return kasan_quarantine_put(cache, object); + return false; } bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip, bool init) { - return ____kasan_slab_free(cache, object, ip, true, init); + bool buggy_object = poison_slab_object(cache, object, ip, init); + + return buggy_object ? true : kasan_quarantine_put(cache, object); } static inline bool check_page_allocation(void *ptr, unsigned long ip) @@ -472,7 +470,7 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) } slab = folio_slab(folio); - return !____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); + return !poison_slab_object(slab->slab_cache, ptr, ip, false); } void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) -- cgit v1.2.3 From ce37eec0ab62fb1f04509b83161845859815ee13 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:28:54 +0100 Subject: kasan: clean up and rename ____kasan_kmalloc Introduce a new poison_kmalloc_redzone helper function that poisons the redzone for kmalloc object. Drop the confusingly named ____kasan_kmalloc function and instead use poison_kmalloc_redzone along with the other required parts of ____kasan_kmalloc in the callers' code. This is a preparatory change for the following patches in this series. Link: https://lkml.kernel.org/r/5881232ad357ec0d59a5b1aefd9e0673a386399a.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/common.c | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 59146886e57d..1217b260abc3 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -312,26 +312,12 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, return tagged_object; } -static inline void *____kasan_kmalloc(struct kmem_cache *cache, +static inline void poison_kmalloc_redzone(struct kmem_cache *cache, const void *object, size_t size, gfp_t flags) { unsigned long redzone_start; unsigned long redzone_end; - if (gfpflags_allow_blocking(flags)) - kasan_quarantine_reduce(); - - if (unlikely(object == NULL)) - return NULL; - - if (is_kfence_address(kasan_reset_tag(object))) - return (void *)object; - - /* - * The object has already been unpoisoned by kasan_slab_alloc() for - * kmalloc() or by kasan_krealloc() for krealloc(). - */ - /* * The redzone has byte-level precision for the generic mode. * Partially poison the last object granule to cover the unaligned @@ -355,14 +341,25 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache, if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache)) kasan_save_alloc_info(cache, (void *)object, flags); - /* Keep the tag that was set by kasan_slab_alloc(). */ - return (void *)object; } void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, gfp_t flags) { - return ____kasan_kmalloc(cache, object, size, flags); + if (gfpflags_allow_blocking(flags)) + kasan_quarantine_reduce(); + + if (unlikely(object == NULL)) + return NULL; + + if (is_kfence_address(kasan_reset_tag(object))) + return (void *)object; + + /* The object has already been unpoisoned by kasan_slab_alloc(). */ + poison_kmalloc_redzone(cache, object, size, flags); + + /* Keep the tag that was set by kasan_slab_alloc(). */ + return (void *)object; } EXPORT_SYMBOL(__kasan_kmalloc); @@ -408,6 +405,9 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag if (unlikely(object == ZERO_SIZE_PTR)) return (void *)object; + if (is_kfence_address(kasan_reset_tag(object))) + return (void *)object; + /* * Unpoison the object's data. * Part of it might already have been unpoisoned, but it's unknown @@ -420,8 +420,10 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag /* Piggy-back on kmalloc() instrumentation to poison the redzone. */ if (unlikely(!slab)) return __kasan_kmalloc_large(object, size, flags); - else - return ____kasan_kmalloc(slab->slab_cache, object, size, flags); + else { + poison_kmalloc_redzone(slab->slab_cache, object, size, flags); + return (void *)object; + } } bool __kasan_mempool_poison_pages(struct page *page, unsigned int order, -- cgit v1.2.3 From 0cc9fdbf4a5273310779bd4779fcdfb4705438a6 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:28:55 +0100 Subject: kasan: introduce poison_kmalloc_large_redzone Split out a poison_kmalloc_large_redzone helper from __kasan_kmalloc_large and use it in the caller's code. This is a preparatory change for the following patches in this series. Link: https://lkml.kernel.org/r/93317097b668519d76097fb065201b2027436e22.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/common.c | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 1217b260abc3..962805bf5f62 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -363,23 +363,12 @@ void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object } EXPORT_SYMBOL(__kasan_kmalloc); -void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, +static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size, gfp_t flags) { unsigned long redzone_start; unsigned long redzone_end; - if (gfpflags_allow_blocking(flags)) - kasan_quarantine_reduce(); - - if (unlikely(ptr == NULL)) - return NULL; - - /* - * The object has already been unpoisoned by kasan_unpoison_pages() for - * alloc_pages() or by kasan_krealloc() for krealloc(). - */ - /* * The redzone has byte-level precision for the generic mode. * Partially poison the last object granule to cover the unaligned @@ -389,12 +378,25 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, kasan_poison_last_granule(ptr, size); /* Poison the aligned part of the redzone. */ - redzone_start = round_up((unsigned long)(ptr + size), - KASAN_GRANULE_SIZE); + redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE); redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr)); kasan_poison((void *)redzone_start, redzone_end - redzone_start, KASAN_PAGE_REDZONE, false); +} +void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, + gfp_t flags) +{ + if (gfpflags_allow_blocking(flags)) + kasan_quarantine_reduce(); + + if (unlikely(ptr == NULL)) + return NULL; + + /* The object has already been unpoisoned by kasan_unpoison_pages(). */ + poison_kmalloc_large_redzone(ptr, size, flags); + + /* Keep the tag that was set by alloc_pages(). */ return (void *)ptr; } @@ -402,6 +404,9 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag { struct slab *slab; + if (gfpflags_allow_blocking(flags)) + kasan_quarantine_reduce(); + if (unlikely(object == ZERO_SIZE_PTR)) return (void *)object; @@ -419,11 +424,11 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag /* Piggy-back on kmalloc() instrumentation to poison the redzone. */ if (unlikely(!slab)) - return __kasan_kmalloc_large(object, size, flags); - else { + poison_kmalloc_large_redzone(object, size, flags); + else poison_kmalloc_redzone(slab->slab_cache, object, size, flags); - return (void *)object; - } + + return (void *)object; } bool __kasan_mempool_poison_pages(struct page *page, unsigned int order, -- cgit v1.2.3 From 29d7355a9d05de9a6e38cc4d1146fb96c43853fb Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:28:56 +0100 Subject: kasan: save alloc stack traces for mempool Update kasan_mempool_unpoison_object to properly poison the redzone and save alloc strack traces for kmalloc and slab pools. As a part of this change, split out and use a unpoison_slab_object helper function from __kasan_slab_alloc. [nathan@kernel.org: mark unpoison_slab_object() as static] Link: https://lkml.kernel.org/r/20231221180042.104694-1-andrey.konovalov@linux.dev Link: https://lkml.kernel.org/r/05ad235da8347cfe14d496d01b2aaf074b4f607c.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Signed-off-by: Nathan Chancellor Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- include/linux/kasan.h | 7 ++++--- mm/kasan/common.c | 50 ++++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 44 insertions(+), 13 deletions(-) (limited to 'mm/kasan') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index e636a00e26ba..7392c5d89b92 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -303,9 +303,10 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip); * mempool). * * This function unpoisons a slab allocation that was previously poisoned via - * kasan_mempool_poison_object() without initializing its memory. For the - * tag-based modes, this function does not assign a new tag to the allocation - * and instead restores the original tags based on the pointer value. + * kasan_mempool_poison_object() and saves an alloc stack trace for it without + * initializing the allocation's memory. For the tag-based modes, this function + * does not assign a new tag to the allocation and instead restores the + * original tags based on the pointer value. * * This function operates on all slab allocations including large kmalloc * allocations (the ones returned by kmalloc_large() or by kmalloc() with the diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 962805bf5f62..bf16c2dfa8e7 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -277,6 +277,20 @@ void __kasan_kfree_large(void *ptr, unsigned long ip) /* The object will be poisoned by kasan_poison_pages(). */ } +static inline void unpoison_slab_object(struct kmem_cache *cache, void *object, + gfp_t flags, bool init) +{ + /* + * Unpoison the whole object. For kmalloc() allocations, + * poison_kmalloc_redzone() will do precise poisoning. + */ + kasan_unpoison(object, cache->object_size, init); + + /* Save alloc info (if possible) for non-kmalloc() allocations. */ + if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache)) + kasan_save_alloc_info(cache, object, flags); +} + void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags, bool init) { @@ -299,15 +313,8 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, tag = assign_tag(cache, object, false); tagged_object = set_tag(object, tag); - /* - * Unpoison the whole object. - * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning. - */ - kasan_unpoison(tagged_object, cache->object_size, init); - - /* Save alloc info (if possible) for non-kmalloc() allocations. */ - if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache)) - kasan_save_alloc_info(cache, tagged_object, flags); + /* Unpoison the object and save alloc info for non-kmalloc() allocations. */ + unpoison_slab_object(cache, tagged_object, flags, init); return tagged_object; } @@ -482,7 +489,30 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) { - kasan_unpoison(ptr, size, false); + struct slab *slab; + gfp_t flags = 0; /* Might be executing under a lock. */ + + if (is_kfence_address(kasan_reset_tag(ptr))) + return; + + slab = virt_to_slab(ptr); + + /* + * This function can be called for large kmalloc allocation that get + * their memory from page_alloc. + */ + if (unlikely(!slab)) { + kasan_unpoison(ptr, size, false); + poison_kmalloc_large_redzone(ptr, size, flags); + return; + } + + /* Unpoison the object and save alloc info for non-kmalloc() allocations. */ + unpoison_slab_object(slab->slab_cache, ptr, size, flags); + + /* Poison the redzone and save alloc info for kmalloc() allocations. */ + if (is_kmalloc_cache(slab->slab_cache)) + poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags); } bool __kasan_check_byte(const void *address, unsigned long ip) -- cgit v1.2.3 From 0f199eb4351ffb453c3df2da733213bef89a03b4 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:29:00 +0100 Subject: kasan: add mempool tests Add KASAN tests for mempool. Link: https://lkml.kernel.org/r/5fd64732266be8287711b6408d86ffc78784be06.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/kasan_test.c | 319 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 319 insertions(+) (limited to 'mm/kasan') diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index 34515a106ca5..23184dbd05a3 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -810,6 +811,312 @@ static void kmem_cache_bulk(struct kunit *test) kmem_cache_destroy(cache); } +static void *mempool_prepare_kmalloc(struct kunit *test, mempool_t *pool, size_t size) +{ + int pool_size = 4; + int ret; + void *elem; + + memset(pool, 0, sizeof(*pool)); + ret = mempool_init_kmalloc_pool(pool, pool_size, size); + KUNIT_ASSERT_EQ(test, ret, 0); + + /* + * Allocate one element to prevent mempool from freeing elements to the + * underlying allocator and instead make it add them to the element + * list when the tests trigger double-free and invalid-free bugs. + * This allows testing KASAN annotations in add_element(). + */ + elem = mempool_alloc_preallocated(pool); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem); + + return elem; +} + +static struct kmem_cache *mempool_prepare_slab(struct kunit *test, mempool_t *pool, size_t size) +{ + struct kmem_cache *cache; + int pool_size = 4; + int ret; + + cache = kmem_cache_create("test_cache", size, 0, 0, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); + + memset(pool, 0, sizeof(*pool)); + ret = mempool_init_slab_pool(pool, pool_size, cache); + KUNIT_ASSERT_EQ(test, ret, 0); + + /* + * Do not allocate one preallocated element, as we skip the double-free + * and invalid-free tests for slab mempool for simplicity. + */ + + return cache; +} + +static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order) +{ + int pool_size = 4; + int ret; + void *elem; + + memset(pool, 0, sizeof(*pool)); + ret = mempool_init_page_pool(pool, pool_size, order); + KUNIT_ASSERT_EQ(test, ret, 0); + + elem = mempool_alloc_preallocated(pool); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem); + + return elem; +} + +static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t size) +{ + char *elem; + + elem = mempool_alloc_preallocated(pool); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem); + + OPTIMIZER_HIDE_VAR(elem); + + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) + KUNIT_EXPECT_KASAN_FAIL(test, + ((volatile char *)&elem[size])[0]); + else + KUNIT_EXPECT_KASAN_FAIL(test, + ((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]); + + mempool_free(elem, pool); +} + +static void mempool_kmalloc_oob_right(struct kunit *test) +{ + mempool_t pool; + size_t size = 128 - KASAN_GRANULE_SIZE - 5; + void *extra_elem; + + extra_elem = mempool_prepare_kmalloc(test, &pool, size); + + mempool_oob_right_helper(test, &pool, size); + + mempool_free(extra_elem, &pool); + mempool_exit(&pool); +} + +static void mempool_kmalloc_large_oob_right(struct kunit *test) +{ + mempool_t pool; + size_t size = KMALLOC_MAX_CACHE_SIZE + 1; + void *extra_elem; + + extra_elem = mempool_prepare_kmalloc(test, &pool, size); + + mempool_oob_right_helper(test, &pool, size); + + mempool_free(extra_elem, &pool); + mempool_exit(&pool); +} + +static void mempool_slab_oob_right(struct kunit *test) +{ + mempool_t pool; + size_t size = 123; + struct kmem_cache *cache; + + cache = mempool_prepare_slab(test, &pool, size); + + mempool_oob_right_helper(test, &pool, size); + + mempool_exit(&pool); + kmem_cache_destroy(cache); +} + +/* + * Skip the out-of-bounds test for page mempool. With Generic KASAN, page + * allocations have no redzones, and thus the out-of-bounds detection is not + * guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With + * the tag-based KASAN modes, the neighboring allocation might have the same + * tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505. + */ + +static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page) +{ + char *elem, *ptr; + + elem = mempool_alloc_preallocated(pool); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem); + + mempool_free(elem, pool); + + ptr = page ? page_address((struct page *)elem) : elem; + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); +} + +static void mempool_kmalloc_uaf(struct kunit *test) +{ + mempool_t pool; + size_t size = 128; + void *extra_elem; + + extra_elem = mempool_prepare_kmalloc(test, &pool, size); + + mempool_uaf_helper(test, &pool, false); + + mempool_free(extra_elem, &pool); + mempool_exit(&pool); +} + +static void mempool_kmalloc_large_uaf(struct kunit *test) +{ + mempool_t pool; + size_t size = KMALLOC_MAX_CACHE_SIZE + 1; + void *extra_elem; + + /* page_alloc fallback is only implemented for SLUB. */ + KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); + + extra_elem = mempool_prepare_kmalloc(test, &pool, size); + + mempool_uaf_helper(test, &pool, false); + + mempool_free(extra_elem, &pool); + mempool_exit(&pool); +} + +static void mempool_slab_uaf(struct kunit *test) +{ + mempool_t pool; + size_t size = 123; + struct kmem_cache *cache; + + cache = mempool_prepare_slab(test, &pool, size); + + mempool_uaf_helper(test, &pool, false); + + mempool_exit(&pool); + kmem_cache_destroy(cache); +} + +static void mempool_page_alloc_uaf(struct kunit *test) +{ + mempool_t pool; + int order = 2; + void *extra_elem; + + extra_elem = mempool_prepare_page(test, &pool, order); + + mempool_uaf_helper(test, &pool, true); + + mempool_free(extra_elem, &pool); + mempool_exit(&pool); +} + +static void mempool_double_free_helper(struct kunit *test, mempool_t *pool) +{ + char *elem; + + elem = mempool_alloc_preallocated(pool); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem); + + mempool_free(elem, pool); + + KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem, pool)); +} + +static void mempool_kmalloc_double_free(struct kunit *test) +{ + mempool_t pool; + size_t size = 128; + char *extra_elem; + + extra_elem = mempool_prepare_kmalloc(test, &pool, size); + + mempool_double_free_helper(test, &pool); + + mempool_free(extra_elem, &pool); + mempool_exit(&pool); +} + +static void mempool_kmalloc_large_double_free(struct kunit *test) +{ + mempool_t pool; + size_t size = KMALLOC_MAX_CACHE_SIZE + 1; + char *extra_elem; + + /* page_alloc fallback is only implemented for SLUB. */ + KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); + + extra_elem = mempool_prepare_kmalloc(test, &pool, size); + + mempool_double_free_helper(test, &pool); + + mempool_free(extra_elem, &pool); + mempool_exit(&pool); +} + +static void mempool_page_alloc_double_free(struct kunit *test) +{ + mempool_t pool; + int order = 2; + char *extra_elem; + + extra_elem = mempool_prepare_page(test, &pool, order); + + mempool_double_free_helper(test, &pool); + + mempool_free(extra_elem, &pool); + mempool_exit(&pool); +} + +static void mempool_kmalloc_invalid_free_helper(struct kunit *test, mempool_t *pool) +{ + char *elem; + + elem = mempool_alloc_preallocated(pool); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem); + + KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem + 1, pool)); + + mempool_free(elem, pool); +} + +static void mempool_kmalloc_invalid_free(struct kunit *test) +{ + mempool_t pool; + size_t size = 128; + char *extra_elem; + + extra_elem = mempool_prepare_kmalloc(test, &pool, size); + + mempool_kmalloc_invalid_free_helper(test, &pool); + + mempool_free(extra_elem, &pool); + mempool_exit(&pool); +} + +static void mempool_kmalloc_large_invalid_free(struct kunit *test) +{ + mempool_t pool; + size_t size = KMALLOC_MAX_CACHE_SIZE + 1; + char *extra_elem; + + /* page_alloc fallback is only implemented for SLUB. */ + KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); + + extra_elem = mempool_prepare_kmalloc(test, &pool, size); + + mempool_kmalloc_invalid_free_helper(test, &pool); + + mempool_free(extra_elem, &pool); + mempool_exit(&pool); +} + +/* + * Skip the invalid-free test for page mempool. The invalid-free detection only + * works for compound pages and mempool preallocates all page elements without + * the __GFP_COMP flag. + */ + static char global_array[10]; static void kasan_global_oob_right(struct kunit *test) @@ -1550,6 +1857,18 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmem_cache_oob), KUNIT_CASE(kmem_cache_accounted), KUNIT_CASE(kmem_cache_bulk), + KUNIT_CASE(mempool_kmalloc_oob_right), + KUNIT_CASE(mempool_kmalloc_large_oob_right), + KUNIT_CASE(mempool_slab_oob_right), + KUNIT_CASE(mempool_kmalloc_uaf), + KUNIT_CASE(mempool_kmalloc_large_uaf), + KUNIT_CASE(mempool_slab_uaf), + KUNIT_CASE(mempool_page_alloc_uaf), + KUNIT_CASE(mempool_kmalloc_double_free), + KUNIT_CASE(mempool_kmalloc_large_double_free), + KUNIT_CASE(mempool_page_alloc_double_free), + KUNIT_CASE(mempool_kmalloc_invalid_free), + KUNIT_CASE(mempool_kmalloc_large_invalid_free), KUNIT_CASE(kasan_global_oob_right), KUNIT_CASE(kasan_global_oob_left), KUNIT_CASE(kasan_stack_oob), -- cgit v1.2.3 From 0f18ea6ea44cde3d7660e52fa8729d420f97409a Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:29:01 +0100 Subject: kasan: rename pagealloc tests Rename "pagealloc" KASAN tests: 1. Use "kmalloc_large" for tests that use large kmalloc allocations. 2. Use "page_alloc" for tests that use page_alloc. Also clean up the comments. Link: https://lkml.kernel.org/r/f3eef6ddb87176c40958a3e5a0bd2386b52af4c6.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/kasan_test.c | 51 ++++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 25 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index 23184dbd05a3..3cd977d60569 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -214,12 +214,13 @@ static void kmalloc_node_oob_right(struct kunit *test) } /* - * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't - * fit into a slab cache and therefore is allocated via the page allocator - * fallback. Since this kind of fallback is only implemented for SLUB, these - * tests are limited to that allocator. + * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk + * that does not fit into the largest slab cache and therefore is allocated via + * the page_alloc fallback for SLUB. SLAB has no such fallback, and thus these + * tests are not supported for it. */ -static void kmalloc_pagealloc_oob_right(struct kunit *test) + +static void kmalloc_large_oob_right(struct kunit *test) { char *ptr; size_t size = KMALLOC_MAX_CACHE_SIZE + 10; @@ -235,7 +236,7 @@ static void kmalloc_pagealloc_oob_right(struct kunit *test) kfree(ptr); } -static void kmalloc_pagealloc_uaf(struct kunit *test) +static void kmalloc_large_uaf(struct kunit *test) { char *ptr; size_t size = KMALLOC_MAX_CACHE_SIZE + 10; @@ -249,7 +250,7 @@ static void kmalloc_pagealloc_uaf(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); } -static void kmalloc_pagealloc_invalid_free(struct kunit *test) +static void kmalloc_large_invalid_free(struct kunit *test) { char *ptr; size_t size = KMALLOC_MAX_CACHE_SIZE + 10; @@ -262,7 +263,7 @@ static void kmalloc_pagealloc_invalid_free(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1)); } -static void pagealloc_oob_right(struct kunit *test) +static void page_alloc_oob_right(struct kunit *test) { char *ptr; struct page *pages; @@ -284,7 +285,7 @@ static void pagealloc_oob_right(struct kunit *test) free_pages((unsigned long)ptr, order); } -static void pagealloc_uaf(struct kunit *test) +static void page_alloc_uaf(struct kunit *test) { char *ptr; struct page *pages; @@ -298,15 +299,15 @@ static void pagealloc_uaf(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); } -static void kmalloc_large_oob_right(struct kunit *test) +/* + * Check that KASAN detects an out-of-bounds access for a big object allocated + * via kmalloc(). But not as big as to trigger the page_alloc fallback for SLUB. + */ +static void kmalloc_big_oob_right(struct kunit *test) { char *ptr; size_t size = KMALLOC_MAX_CACHE_SIZE - 256; - /* - * Allocate a chunk that is large enough, but still fits into a slab - * and does not trigger the page allocator fallback in SLUB. - */ ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); @@ -404,18 +405,18 @@ static void krealloc_less_oob(struct kunit *test) krealloc_less_oob_helper(test, 235, 201); } -static void krealloc_pagealloc_more_oob(struct kunit *test) +static void krealloc_large_more_oob(struct kunit *test) { - /* page_alloc fallback in only implemented for SLUB. */ + /* page_alloc fallback is only implemented for SLUB. */ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201, KMALLOC_MAX_CACHE_SIZE + 235); } -static void krealloc_pagealloc_less_oob(struct kunit *test) +static void krealloc_large_less_oob(struct kunit *test) { - /* page_alloc fallback in only implemented for SLUB. */ + /* page_alloc fallback is only implemented for SLUB. */ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235, @@ -1828,16 +1829,16 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmalloc_oob_right), KUNIT_CASE(kmalloc_oob_left), KUNIT_CASE(kmalloc_node_oob_right), - KUNIT_CASE(kmalloc_pagealloc_oob_right), - KUNIT_CASE(kmalloc_pagealloc_uaf), - KUNIT_CASE(kmalloc_pagealloc_invalid_free), - KUNIT_CASE(pagealloc_oob_right), - KUNIT_CASE(pagealloc_uaf), KUNIT_CASE(kmalloc_large_oob_right), + KUNIT_CASE(kmalloc_large_uaf), + KUNIT_CASE(kmalloc_large_invalid_free), + KUNIT_CASE(page_alloc_oob_right), + KUNIT_CASE(page_alloc_uaf), + KUNIT_CASE(kmalloc_big_oob_right), KUNIT_CASE(krealloc_more_oob), KUNIT_CASE(krealloc_less_oob), - KUNIT_CASE(krealloc_pagealloc_more_oob), - KUNIT_CASE(krealloc_pagealloc_less_oob), + KUNIT_CASE(krealloc_large_more_oob), + KUNIT_CASE(krealloc_large_less_oob), KUNIT_CASE(krealloc_uaf), KUNIT_CASE(kmalloc_oob_16), KUNIT_CASE(kmalloc_uaf_16), -- cgit v1.2.3 From 86b15969831bde23c96de00db46687762a6e9e7d Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:29:02 +0100 Subject: kasan: reorder tests Put closely related tests next to each other. No functional changes. Link: https://lkml.kernel.org/r/acf0ee309394dbb5764c400434753ff030dd3d6c.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/kasan_test.c | 418 +++++++++++++++++++++++++------------------------- 1 file changed, 209 insertions(+), 209 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index 3cd977d60569..aa994b62378b 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -213,6 +213,23 @@ static void kmalloc_node_oob_right(struct kunit *test) kfree(ptr); } +/* + * Check that KASAN detects an out-of-bounds access for a big object allocated + * via kmalloc(). But not as big as to trigger the page_alloc fallback for SLUB. + */ +static void kmalloc_big_oob_right(struct kunit *test) +{ + char *ptr; + size_t size = KMALLOC_MAX_CACHE_SIZE - 256; + + ptr = kmalloc(size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + + OPTIMIZER_HIDE_VAR(ptr); + KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); + kfree(ptr); +} + /* * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk * that does not fit into the largest slab cache and therefore is allocated via @@ -299,23 +316,6 @@ static void page_alloc_uaf(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); } -/* - * Check that KASAN detects an out-of-bounds access for a big object allocated - * via kmalloc(). But not as big as to trigger the page_alloc fallback for SLUB. - */ -static void kmalloc_big_oob_right(struct kunit *test) -{ - char *ptr; - size_t size = KMALLOC_MAX_CACHE_SIZE - 256; - - ptr = kmalloc(size, GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - - OPTIMIZER_HIDE_VAR(ptr); - KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); - kfree(ptr); -} - static void krealloc_more_oob_helper(struct kunit *test, size_t size1, size_t size2) { @@ -710,6 +710,126 @@ static void kmalloc_uaf3(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]); } +static void kmalloc_double_kzfree(struct kunit *test) +{ + char *ptr; + size_t size = 16; + + ptr = kmalloc(size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + + kfree_sensitive(ptr); + KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr)); +} + +/* Check that ksize() does NOT unpoison whole object. */ +static void ksize_unpoisons_memory(struct kunit *test) +{ + char *ptr; + size_t size = 128 - KASAN_GRANULE_SIZE - 5; + size_t real_size; + + ptr = kmalloc(size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + + real_size = ksize(ptr); + KUNIT_EXPECT_GT(test, real_size, size); + + OPTIMIZER_HIDE_VAR(ptr); + + /* These accesses shouldn't trigger a KASAN report. */ + ptr[0] = 'x'; + ptr[size - 1] = 'x'; + + /* These must trigger a KASAN report. */ + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]); + + kfree(ptr); +} + +/* + * Check that a use-after-free is detected by ksize() and via normal accesses + * after it. + */ +static void ksize_uaf(struct kunit *test) +{ + char *ptr; + int size = 128 - KASAN_GRANULE_SIZE; + + ptr = kmalloc(size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + kfree(ptr); + + OPTIMIZER_HIDE_VAR(ptr); + KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr)); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); +} + +/* + * The two tests below check that Generic KASAN prints auxiliary stack traces + * for RCU callbacks and workqueues. The reports need to be inspected manually. + * + * These tests are still enabled for other KASAN modes to make sure that all + * modes report bad accesses in tested scenarios. + */ + +static struct kasan_rcu_info { + int i; + struct rcu_head rcu; +} *global_rcu_ptr; + +static void rcu_uaf_reclaim(struct rcu_head *rp) +{ + struct kasan_rcu_info *fp = + container_of(rp, struct kasan_rcu_info, rcu); + + kfree(fp); + ((volatile struct kasan_rcu_info *)fp)->i; +} + +static void rcu_uaf(struct kunit *test) +{ + struct kasan_rcu_info *ptr; + + ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + + global_rcu_ptr = rcu_dereference_protected( + (struct kasan_rcu_info __rcu *)ptr, NULL); + + KUNIT_EXPECT_KASAN_FAIL(test, + call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim); + rcu_barrier()); +} + +static void workqueue_uaf_work(struct work_struct *work) +{ + kfree(work); +} + +static void workqueue_uaf(struct kunit *test) +{ + struct workqueue_struct *workqueue; + struct work_struct *work; + + workqueue = create_workqueue("kasan_workqueue_test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue); + + work = kmalloc(sizeof(struct work_struct), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work); + + INIT_WORK(work, workqueue_uaf_work); + queue_work(workqueue, work); + destroy_workqueue(workqueue); + + KUNIT_EXPECT_KASAN_FAIL(test, + ((volatile struct work_struct *)work)->data); +} + static void kfree_via_page(struct kunit *test) { char *ptr; @@ -760,6 +880,69 @@ static void kmem_cache_oob(struct kunit *test) kmem_cache_destroy(cache); } +static void kmem_cache_double_free(struct kunit *test) +{ + char *p; + size_t size = 200; + struct kmem_cache *cache; + + cache = kmem_cache_create("test_cache", size, 0, 0, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); + + p = kmem_cache_alloc(cache, GFP_KERNEL); + if (!p) { + kunit_err(test, "Allocation failed: %s\n", __func__); + kmem_cache_destroy(cache); + return; + } + + kmem_cache_free(cache, p); + KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p)); + kmem_cache_destroy(cache); +} + +static void kmem_cache_invalid_free(struct kunit *test) +{ + char *p; + size_t size = 200; + struct kmem_cache *cache; + + cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, + NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); + + p = kmem_cache_alloc(cache, GFP_KERNEL); + if (!p) { + kunit_err(test, "Allocation failed: %s\n", __func__); + kmem_cache_destroy(cache); + return; + } + + /* Trigger invalid free, the object doesn't get freed. */ + KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1)); + + /* + * Properly free the object to prevent the "Objects remaining in + * test_cache on __kmem_cache_shutdown" BUG failure. + */ + kmem_cache_free(cache, p); + + kmem_cache_destroy(cache); +} + +static void empty_cache_ctor(void *object) { } + +static void kmem_cache_double_destroy(struct kunit *test) +{ + struct kmem_cache *cache; + + /* Provide a constructor to prevent cache merging. */ + cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); + kmem_cache_destroy(cache); + KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache)); +} + static void kmem_cache_accounted(struct kunit *test) { int i; @@ -1157,53 +1340,6 @@ static void kasan_global_oob_left(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); } -/* Check that ksize() does NOT unpoison whole object. */ -static void ksize_unpoisons_memory(struct kunit *test) -{ - char *ptr; - size_t size = 128 - KASAN_GRANULE_SIZE - 5; - size_t real_size; - - ptr = kmalloc(size, GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - - real_size = ksize(ptr); - KUNIT_EXPECT_GT(test, real_size, size); - - OPTIMIZER_HIDE_VAR(ptr); - - /* These accesses shouldn't trigger a KASAN report. */ - ptr[0] = 'x'; - ptr[size - 1] = 'x'; - - /* These must trigger a KASAN report. */ - if (IS_ENABLED(CONFIG_KASAN_GENERIC)) - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]); - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]); - - kfree(ptr); -} - -/* - * Check that a use-after-free is detected by ksize() and via normal accesses - * after it. - */ -static void ksize_uaf(struct kunit *test) -{ - char *ptr; - int size = 128 - KASAN_GRANULE_SIZE; - - ptr = kmalloc(size, GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - kfree(ptr); - - OPTIMIZER_HIDE_VAR(ptr); - KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr)); - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); - KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); -} - static void kasan_stack_oob(struct kunit *test) { char stack_array[10]; @@ -1246,69 +1382,6 @@ static void kasan_alloca_oob_right(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); } -static void kmem_cache_double_free(struct kunit *test) -{ - char *p; - size_t size = 200; - struct kmem_cache *cache; - - cache = kmem_cache_create("test_cache", size, 0, 0, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); - - p = kmem_cache_alloc(cache, GFP_KERNEL); - if (!p) { - kunit_err(test, "Allocation failed: %s\n", __func__); - kmem_cache_destroy(cache); - return; - } - - kmem_cache_free(cache, p); - KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p)); - kmem_cache_destroy(cache); -} - -static void kmem_cache_invalid_free(struct kunit *test) -{ - char *p; - size_t size = 200; - struct kmem_cache *cache; - - cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, - NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); - - p = kmem_cache_alloc(cache, GFP_KERNEL); - if (!p) { - kunit_err(test, "Allocation failed: %s\n", __func__); - kmem_cache_destroy(cache); - return; - } - - /* Trigger invalid free, the object doesn't get freed. */ - KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1)); - - /* - * Properly free the object to prevent the "Objects remaining in - * test_cache on __kmem_cache_shutdown" BUG failure. - */ - kmem_cache_free(cache, p); - - kmem_cache_destroy(cache); -} - -static void empty_cache_ctor(void *object) { } - -static void kmem_cache_double_destroy(struct kunit *test) -{ - struct kmem_cache *cache; - - /* Provide a constructor to prevent cache merging. */ - cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); - kmem_cache_destroy(cache); - KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache)); -} - static void kasan_memchr(struct kunit *test) { char *ptr; @@ -1470,79 +1543,6 @@ static void kasan_bitops_tags(struct kunit *test) kfree(bits); } -static void kmalloc_double_kzfree(struct kunit *test) -{ - char *ptr; - size_t size = 16; - - ptr = kmalloc(size, GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - - kfree_sensitive(ptr); - KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr)); -} - -/* - * The two tests below check that Generic KASAN prints auxiliary stack traces - * for RCU callbacks and workqueues. The reports need to be inspected manually. - * - * These tests are still enabled for other KASAN modes to make sure that all - * modes report bad accesses in tested scenarios. - */ - -static struct kasan_rcu_info { - int i; - struct rcu_head rcu; -} *global_rcu_ptr; - -static void rcu_uaf_reclaim(struct rcu_head *rp) -{ - struct kasan_rcu_info *fp = - container_of(rp, struct kasan_rcu_info, rcu); - - kfree(fp); - ((volatile struct kasan_rcu_info *)fp)->i; -} - -static void rcu_uaf(struct kunit *test) -{ - struct kasan_rcu_info *ptr; - - ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - - global_rcu_ptr = rcu_dereference_protected( - (struct kasan_rcu_info __rcu *)ptr, NULL); - - KUNIT_EXPECT_KASAN_FAIL(test, - call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim); - rcu_barrier()); -} - -static void workqueue_uaf_work(struct work_struct *work) -{ - kfree(work); -} - -static void workqueue_uaf(struct kunit *test) -{ - struct workqueue_struct *workqueue; - struct work_struct *work; - - workqueue = create_workqueue("kasan_workqueue_test"); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue); - - work = kmalloc(sizeof(struct work_struct), GFP_KERNEL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work); - - INIT_WORK(work, workqueue_uaf_work); - queue_work(workqueue, work); - destroy_workqueue(workqueue); - - KUNIT_EXPECT_KASAN_FAIL(test, - ((volatile struct work_struct *)work)->data); -} - static void vmalloc_helpers_tags(struct kunit *test) { void *ptr; @@ -1829,12 +1829,12 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmalloc_oob_right), KUNIT_CASE(kmalloc_oob_left), KUNIT_CASE(kmalloc_node_oob_right), + KUNIT_CASE(kmalloc_big_oob_right), KUNIT_CASE(kmalloc_large_oob_right), KUNIT_CASE(kmalloc_large_uaf), KUNIT_CASE(kmalloc_large_invalid_free), KUNIT_CASE(page_alloc_oob_right), KUNIT_CASE(page_alloc_uaf), - KUNIT_CASE(kmalloc_big_oob_right), KUNIT_CASE(krealloc_more_oob), KUNIT_CASE(krealloc_less_oob), KUNIT_CASE(krealloc_large_more_oob), @@ -1853,9 +1853,17 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmalloc_uaf_memset), KUNIT_CASE(kmalloc_uaf2), KUNIT_CASE(kmalloc_uaf3), + KUNIT_CASE(kmalloc_double_kzfree), + KUNIT_CASE(ksize_unpoisons_memory), + KUNIT_CASE(ksize_uaf), + KUNIT_CASE(rcu_uaf), + KUNIT_CASE(workqueue_uaf), KUNIT_CASE(kfree_via_page), KUNIT_CASE(kfree_via_phys), KUNIT_CASE(kmem_cache_oob), + KUNIT_CASE(kmem_cache_double_free), + KUNIT_CASE(kmem_cache_invalid_free), + KUNIT_CASE(kmem_cache_double_destroy), KUNIT_CASE(kmem_cache_accounted), KUNIT_CASE(kmem_cache_bulk), KUNIT_CASE(mempool_kmalloc_oob_right), @@ -1875,19 +1883,11 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kasan_stack_oob), KUNIT_CASE(kasan_alloca_oob_left), KUNIT_CASE(kasan_alloca_oob_right), - KUNIT_CASE(ksize_unpoisons_memory), - KUNIT_CASE(ksize_uaf), - KUNIT_CASE(kmem_cache_double_free), - KUNIT_CASE(kmem_cache_invalid_free), - KUNIT_CASE(kmem_cache_double_destroy), KUNIT_CASE(kasan_memchr), KUNIT_CASE(kasan_memcmp), KUNIT_CASE(kasan_strings), KUNIT_CASE(kasan_bitops_generic), KUNIT_CASE(kasan_bitops_tags), - KUNIT_CASE(kmalloc_double_kzfree), - KUNIT_CASE(rcu_uaf), - KUNIT_CASE(workqueue_uaf), KUNIT_CASE(vmalloc_helpers_tags), KUNIT_CASE(vmalloc_oob), KUNIT_CASE(vmap_tags), -- cgit v1.2.3 From 1ce9a0523938f87dd8505233cc3445f8e2d8dcee Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 23:29:03 +0100 Subject: kasan: rename and document kasan_(un)poison_object_data Rename kasan_unpoison_object_data to kasan_unpoison_new_object and add a documentation comment. Do the same for kasan_poison_object_data. The new names and the comments should suggest the users that these hooks are intended for internal use by the slab allocator. The following patch will remove non-slab-internal uses of these hooks. No functional changes. [andreyknvl@google.com: update references to renamed functions in comments] Link: https://lkml.kernel.org/r/20231221180637.105098-1-andrey.konovalov@linux.dev Link: https://lkml.kernel.org/r/eab156ebbd635f9635ef67d1a4271f716994e628.1703024586.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Lobakin Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Breno Leitao Cc: Dmitry Vyukov Cc: Evgenii Stepanov Signed-off-by: Andrew Morton --- include/linux/kasan.h | 35 +++++++++++++++++++++++++++-------- mm/kasan/common.c | 4 ++-- mm/kasan/shadow.c | 4 ++-- mm/slab.c | 10 ++++------ mm/slub.c | 4 ++-- net/core/skbuff.c | 8 ++++---- 6 files changed, 41 insertions(+), 24 deletions(-) (limited to 'mm/kasan') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 7392c5d89b92..d49e3d4c099e 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -129,20 +129,39 @@ static __always_inline void kasan_poison_slab(struct slab *slab) __kasan_poison_slab(slab); } -void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); -static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, +void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object); +/** + * kasan_unpoison_new_object - Temporarily unpoison a new slab object. + * @cache: Cache the object belong to. + * @object: Pointer to the object. + * + * This function is intended for the slab allocator's internal use. It + * temporarily unpoisons an object from a newly allocated slab without doing + * anything else. The object must later be repoisoned by + * kasan_poison_new_object(). + */ +static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache, void *object) { if (kasan_enabled()) - __kasan_unpoison_object_data(cache, object); + __kasan_unpoison_new_object(cache, object); } -void __kasan_poison_object_data(struct kmem_cache *cache, void *object); -static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, +void __kasan_poison_new_object(struct kmem_cache *cache, void *object); +/** + * kasan_unpoison_new_object - Repoison a new slab object. + * @cache: Cache the object belong to. + * @object: Pointer to the object. + * + * This function is intended for the slab allocator's internal use. It + * repoisons an object that was previously unpoisoned by + * kasan_unpoison_new_object() without doing anything else. + */ +static __always_inline void kasan_poison_new_object(struct kmem_cache *cache, void *object) { if (kasan_enabled()) - __kasan_poison_object_data(cache, object); + __kasan_poison_new_object(cache, object); } void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, @@ -342,9 +361,9 @@ static inline bool kasan_unpoison_pages(struct page *page, unsigned int order, return false; } static inline void kasan_poison_slab(struct slab *slab) {} -static inline void kasan_unpoison_object_data(struct kmem_cache *cache, +static inline void kasan_unpoison_new_object(struct kmem_cache *cache, void *object) {} -static inline void kasan_poison_object_data(struct kmem_cache *cache, +static inline void kasan_poison_new_object(struct kmem_cache *cache, void *object) {} static inline void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object) diff --git a/mm/kasan/common.c b/mm/kasan/common.c index bf16c2dfa8e7..f4255e807b74 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -143,12 +143,12 @@ void __kasan_poison_slab(struct slab *slab) KASAN_SLAB_REDZONE, false); } -void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object) +void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object) { kasan_unpoison(object, cache->object_size, false); } -void __kasan_poison_object_data(struct kmem_cache *cache, void *object) +void __kasan_poison_new_object(struct kmem_cache *cache, void *object) { kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), KASAN_SLAB_REDZONE, false); diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index d687f09a7ae3..0154d200be40 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -130,7 +130,7 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init) /* * Perform shadow offset calculation based on untagged address, as - * some of the callers (e.g. kasan_poison_object_data) pass tagged + * some of the callers (e.g. kasan_poison_new_object) pass tagged * addresses to this function. */ addr = kasan_reset_tag(addr); @@ -170,7 +170,7 @@ void kasan_unpoison(const void *addr, size_t size, bool init) /* * Perform shadow offset calculation based on untagged address, as - * some of the callers (e.g. kasan_unpoison_object_data) pass tagged + * some of the callers (e.g. kasan_unpoison_new_object) pass tagged * addresses to this function. */ addr = kasan_reset_tag(addr); diff --git a/mm/slab.c b/mm/slab.c index 9ad3d0f2d1a5..773c79e153f3 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2327,11 +2327,9 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab) * They must also be threaded. */ if (cachep->ctor && !(cachep->flags & SLAB_POISON)) { - kasan_unpoison_object_data(cachep, - objp + obj_offset(cachep)); + kasan_unpoison_new_object(cachep, objp + obj_offset(cachep)); cachep->ctor(objp + obj_offset(cachep)); - kasan_poison_object_data( - cachep, objp + obj_offset(cachep)); + kasan_poison_new_object(cachep, objp + obj_offset(cachep)); } if (cachep->flags & SLAB_RED_ZONE) { @@ -2472,9 +2470,9 @@ static void cache_init_objs(struct kmem_cache *cachep, /* constructor could break poison info */ if (DEBUG == 0 && cachep->ctor) { - kasan_unpoison_object_data(cachep, objp); + kasan_unpoison_new_object(cachep, objp); cachep->ctor(objp); - kasan_poison_object_data(cachep, objp); + kasan_poison_new_object(cachep, objp); } if (!shuffled) diff --git a/mm/slub.c b/mm/slub.c index 782bd8a6bd34..891742e5932a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1860,9 +1860,9 @@ static void *setup_object(struct kmem_cache *s, void *object) setup_object_debug(s, object); object = kasan_init_slab_obj(s, object); if (unlikely(s->ctor)) { - kasan_unpoison_object_data(s, object); + kasan_unpoison_new_object(s, object); s->ctor(object); - kasan_poison_object_data(s, object); + kasan_poison_new_object(s, object); } return object; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b157efea5dea..63bb6526399d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -337,7 +337,7 @@ static struct sk_buff *napi_skb_cache_get(void) } skb = nc->skb_cache[--nc->skb_count]; - kasan_unpoison_object_data(skbuff_cache, skb); + kasan_unpoison_new_object(skbuff_cache, skb); return skb; } @@ -1309,13 +1309,13 @@ static void napi_skb_cache_put(struct sk_buff *skb) struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); u32 i; - kasan_poison_object_data(skbuff_cache, skb); + kasan_poison_new_object(skbuff_cache, skb); nc->skb_cache[nc->skb_count++] = skb; if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) - kasan_unpoison_object_data(skbuff_cache, - nc->skb_cache[i]); + kasan_unpoison_new_object(skbuff_cache, + nc->skb_cache[i]); kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF, nc->skb_cache + NAPI_SKB_CACHE_HALF); -- cgit v1.2.3 From a414d4286f3400aa05631c4931eb3feba83e29e8 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 22:19:51 +0100 Subject: kasan: handle concurrent kasan_record_aux_stack calls kasan_record_aux_stack can be called concurrently on the same object. This might lead to a race condition when rotating the saved aux stack trace handles, which in turns leads to incorrect accounting of stack depot handles and refcount underflows in the stack depot code. Fix by introducing a raw spinlock to protect the aux stack trace handles in kasan_record_aux_stack. Link: https://lkml.kernel.org/r/1606b960e2f746862d1f459515972f9695bf448a.1703020707.git.andreyknvl@google.com Fixes: 773688a6cb24 ("kasan: use stack_depot_put for Generic mode") Signed-off-by: Andrey Konovalov Reported-by: Tetsuo Handa Reported-by: syzbot+186b55175d8360728234@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/000000000000784b1c060b0074a2@google.com/ Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/kasan/generic.c | 32 +++++++++++++++++++++++++++++--- mm/kasan/kasan.h | 8 ++++++++ 2 files changed, 37 insertions(+), 3 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 54e20b2bc3e1..55e6b5db2cae 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -471,8 +472,18 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object) struct kasan_free_meta *free_meta; alloc_meta = kasan_get_alloc_meta(cache, object); - if (alloc_meta) + if (alloc_meta) { __memset(alloc_meta, 0, sizeof(*alloc_meta)); + + /* + * Temporarily disable KASAN bug reporting to allow instrumented + * raw_spin_lock_init to access aux_lock, which resides inside + * of a redzone. + */ + kasan_disable_current(); + raw_spin_lock_init(&alloc_meta->aux_lock); + kasan_enable_current(); + } free_meta = kasan_get_free_meta(cache, object); if (free_meta) __memset(free_meta, 0, sizeof(*free_meta)); @@ -502,6 +513,8 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) struct kmem_cache *cache; struct kasan_alloc_meta *alloc_meta; void *object; + depot_stack_handle_t new_handle, old_handle; + unsigned long flags; if (is_kfence_address(addr) || !slab) return; @@ -512,9 +525,22 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) if (!alloc_meta) return; - stack_depot_put(alloc_meta->aux_stack[1]); + new_handle = kasan_save_stack(0, depot_flags); + + /* + * Temporarily disable KASAN bug reporting to allow instrumented + * spinlock functions to access aux_lock, which resides inside of a + * redzone. + */ + kasan_disable_current(); + raw_spin_lock_irqsave(&alloc_meta->aux_lock, flags); + old_handle = alloc_meta->aux_stack[1]; alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; - alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags); + alloc_meta->aux_stack[0] = new_handle; + raw_spin_unlock_irqrestore(&alloc_meta->aux_lock, flags); + kasan_enable_current(); + + stack_depot_put(old_handle); } void kasan_record_aux_stack(void *addr) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 5e298e3ac909..69e4f5e58e33 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) @@ -249,6 +250,13 @@ struct kasan_global { struct kasan_alloc_meta { struct kasan_track alloc_track; /* Free track is stored in kasan_free_meta. */ + /* + * aux_lock protects aux_stack from accesses from concurrent + * kasan_record_aux_stack calls. It is a raw spinlock to avoid sleeping + * on RT kernels, as kasan_record_aux_stack_noalloc can be called from + * non-sleepable contexts. + */ + raw_spinlock_t aux_lock; depot_stack_handle_t aux_stack[2]; }; -- cgit v1.2.3 From 08d7c94d9635cf3fdffcab5f066d857efbad9507 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 19 Dec 2023 22:19:52 +0100 Subject: kasan: memset free track in qlink_free Instead of only zeroing out the stack depot handle when evicting the free stack trace in qlink_free, zero out the whole track. Do this just to produce a similar effect for alloc and free meta. The other fields of the free track besides the stack trace handle are considered invalid at this point anyway, so no harm in zeroing them out. Link: https://lkml.kernel.org/r/db987c1cd011547e85353b0b9997de190c97e3e6.1703020707.git.andreyknvl@google.com Fixes: 773688a6cb24 ("kasan: use stack_depot_put for Generic mode") Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Tetsuo Handa Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/kasan/quarantine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/kasan') diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 265ca2bbe2dd..782e045da911 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -157,7 +157,7 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) if (free_meta && *(u8 *)kasan_mem_to_shadow(object) == KASAN_SLAB_FREETRACK) { stack_depot_put(free_meta->free_track.stack); - free_meta->free_track.stack = 0; + __memset(&free_meta->free_track, 0, sizeof(free_meta->free_track)); } /* -- cgit v1.2.3 From c20e3feadd4505c46a87dcabef5b129a97992466 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:45 +0100 Subject: kasan: improve kasan_non_canonical_hook Make kasan_non_canonical_hook to be more sure in its report (i.e. say "probably" instead of "maybe") if the address belongs to the shadow memory region for kernel addresses. Also use the kasan_shadow_to_mem helper to calculate the original address. Also improve the comments in kasan_non_canonical_hook. Link: https://lkml.kernel.org/r/af94ef3cb26f8c065048b3158d9f20f6102bfaaa.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/kasan.h | 6 ++++++ mm/kasan/report.c | 34 ++++++++++++++++++++-------------- 2 files changed, 26 insertions(+), 14 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 69e4f5e58e33..0e209b823b2c 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -307,6 +307,12 @@ struct kasan_stack_ring { #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) +static __always_inline bool addr_in_shadow(const void *addr) +{ + return addr >= (void *)KASAN_SHADOW_START && + addr < (void *)KASAN_SHADOW_END; +} + #ifndef kasan_shadow_to_mem static inline const void *kasan_shadow_to_mem(const void *shadow_addr) { diff --git a/mm/kasan/report.c b/mm/kasan/report.c index a938237f6882..4bc7ac9fb37d 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -635,37 +635,43 @@ void kasan_report_async(void) #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) /* - * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high - * canonical half of the address space) cause out-of-bounds shadow memory reads - * before the actual access. For addresses in the low canonical half of the - * address space, as well as most non-canonical addresses, that out-of-bounds - * shadow memory access lands in the non-canonical part of the address space. - * Help the user figure out what the original bogus pointer was. + * With compiler-based KASAN modes, accesses to bogus pointers (outside of the + * mapped kernel address space regions) cause faults when KASAN tries to check + * the shadow memory before the actual memory access. This results in cryptic + * GPF reports, which are hard for users to interpret. This hook helps users to + * figure out what the original bogus pointer was. */ void kasan_non_canonical_hook(unsigned long addr) { unsigned long orig_addr; const char *bug_type; + /* + * All addresses that came as a result of the memory-to-shadow mapping + * (even for bogus pointers) must be >= KASAN_SHADOW_OFFSET. + */ if (addr < KASAN_SHADOW_OFFSET) return; - orig_addr = (addr - KASAN_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT; + orig_addr = (unsigned long)kasan_shadow_to_mem((void *)addr); + /* * For faults near the shadow address for NULL, we can be fairly certain * that this is a KASAN shadow memory access. - * For faults that correspond to shadow for low canonical addresses, we - * can still be pretty sure - that shadow region is a fairly narrow - * chunk of the non-canonical address space. - * But faults that look like shadow for non-canonical addresses are a - * really large chunk of the address space. In that case, we still - * print the decoded address, but make it clear that this is not - * necessarily what's actually going on. + * For faults that correspond to the shadow for low or high canonical + * addresses, we can still be pretty sure: these shadow regions are a + * fairly narrow chunk of the address space. + * But the shadow for non-canonical addresses is a really large chunk + * of the address space. For this case, we still print the decoded + * address, but make it clear that this is not necessarily what's + * actually going on. */ if (orig_addr < PAGE_SIZE) bug_type = "null-ptr-deref"; else if (orig_addr < TASK_SIZE) bug_type = "probably user-memory-access"; + else if (addr_in_shadow((void *)addr)) + bug_type = "probably wild-memory-access"; else bug_type = "maybe wild-memory-access"; pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type, -- cgit v1.2.3 From 3067b919ed81e17b8b8fb932c43f200775b1d545 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:46 +0100 Subject: kasan: clean up kasan_requires_meta Currently, for Generic KASAN mode, kasan_requires_meta is defined to return kasan_stack_collection_enabled. Even though the Generic mode does not support disabling stack trace collection, kasan_requires_meta was implemented in this way to make it easier to implement the disabling for the Generic mode in the future. However, for the Generic mode, the per-object metadata also stores the quarantine link. So even if disabling stack collection is implemented, the per-object metadata will still be required. Fix kasan_requires_meta to return true for the Generic mode and update the related comments. This change does not fix any observable bugs but rather just brings the code to a cleaner state. Link: https://lkml.kernel.org/r/8086623407095ac1c82377a2107dcc5845f99cfa.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/kasan.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 0e209b823b2c..38af25b9c89c 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -101,21 +101,21 @@ static inline bool kasan_sample_page_alloc(unsigned int order) #ifdef CONFIG_KASAN_GENERIC -/* Generic KASAN uses per-object metadata to store stack traces. */ +/* + * Generic KASAN uses per-object metadata to store alloc and free stack traces + * and the quarantine link. + */ static inline bool kasan_requires_meta(void) { - /* - * Technically, Generic KASAN always collects stack traces right now. - * However, let's use kasan_stack_collection_enabled() in case the - * kasan.stacktrace command-line argument is changed to affect - * Generic KASAN. - */ - return kasan_stack_collection_enabled(); + return true; } #else /* CONFIG_KASAN_GENERIC */ -/* Tag-based KASAN modes do not use per-object metadata. */ +/* + * Tag-based KASAN modes do not use per-object metadata: they use the stack + * ring to store alloc and free stack traces and do not use qurantine. + */ static inline bool kasan_requires_meta(void) { return false; -- cgit v1.2.3 From 1a55836a1b002298714dc84032b3d19d9bbc2d66 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:47 +0100 Subject: kasan: update kasan_poison documentation comment The comment for kasan_poison says that the size argument gets aligned by the function to KASAN_GRANULE_SIZE, which is wrong: the argument must be already aligned when it is passed to the function. Remove the invalid part of the comment. Link: https://lkml.kernel.org/r/992a302542059fc40d86ea560eac413ecb31b6a1.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/kasan.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 38af25b9c89c..1c34511090d7 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -513,8 +513,6 @@ static inline bool kasan_byte_accessible(const void *addr) * @size - range size, must be aligned to KASAN_GRANULE_SIZE * @value - value that's written to metadata for the range * @init - whether to initialize the memory range (only for hardware tag-based) - * - * The size gets aligned to KASAN_GRANULE_SIZE before marking the range. */ void kasan_poison(const void *addr, size_t size, u8 value, bool init); -- cgit v1.2.3 From 99f3fe416c71aa3d5aba69174c274309ededfd42 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:48 +0100 Subject: kasan: clean up is_kfence_address checks 1. Do not untag addresses that are passed to is_kfence_address: it tolerates tagged addresses. 2. Move is_kfence_address checks from internal KASAN functions (kasan_poison/unpoison, etc.) to external-facing ones. Note that kasan_poison/unpoison are never called outside of KASAN/slab code anymore; the comment is wrong, so drop it. 3. Simplify/reorganize the code around the updated checks. Link: https://lkml.kernel.org/r/1065732315ef4e141b6177d8f612232d4d5bc0ab.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/common.c | 26 +++++++++++++++++--------- mm/kasan/kasan.h | 16 ++-------------- mm/kasan/shadow.c | 12 ------------ 3 files changed, 19 insertions(+), 35 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index f4255e807b74..86adf80cc11a 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -79,6 +79,9 @@ EXPORT_SYMBOL(kasan_disable_current); void __kasan_unpoison_range(const void *address, size_t size) { + if (is_kfence_address(address)) + return; + kasan_unpoison(address, size, false); } @@ -218,9 +221,6 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object, tagged_object = object; object = kasan_reset_tag(object); - if (is_kfence_address(object)) - return false; - if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) { kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE); return true; @@ -247,7 +247,12 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object, bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip, bool init) { - bool buggy_object = poison_slab_object(cache, object, ip, init); + bool buggy_object; + + if (is_kfence_address(object)) + return false; + + buggy_object = poison_slab_object(cache, object, ip, init); return buggy_object ? true : kasan_quarantine_put(cache, object); } @@ -359,7 +364,7 @@ void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object if (unlikely(object == NULL)) return NULL; - if (is_kfence_address(kasan_reset_tag(object))) + if (is_kfence_address(object)) return (void *)object; /* The object has already been unpoisoned by kasan_slab_alloc(). */ @@ -417,7 +422,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag if (unlikely(object == ZERO_SIZE_PTR)) return (void *)object; - if (is_kfence_address(kasan_reset_tag(object))) + if (is_kfence_address(object)) return (void *)object; /* @@ -483,6 +488,9 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) return true; } + if (is_kfence_address(ptr)) + return false; + slab = folio_slab(folio); return !poison_slab_object(slab->slab_cache, ptr, ip, false); } @@ -492,9 +500,6 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) struct slab *slab; gfp_t flags = 0; /* Might be executing under a lock. */ - if (is_kfence_address(kasan_reset_tag(ptr))) - return; - slab = virt_to_slab(ptr); /* @@ -507,6 +512,9 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) return; } + if (is_kfence_address(ptr)) + return; + /* Unpoison the object and save alloc info for non-kmalloc() allocations. */ unpoison_slab_object(slab->slab_cache, ptr, size, flags); diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 1c34511090d7..5fbcc1b805bc 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -466,35 +466,23 @@ static inline u8 kasan_random_tag(void) { return 0; } static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init) { - addr = kasan_reset_tag(addr); - - /* Skip KFENCE memory if called explicitly outside of sl*b. */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; if (WARN_ON(size & KASAN_GRANULE_MASK)) return; - hw_set_mem_tag_range((void *)addr, size, value, init); + hw_set_mem_tag_range(kasan_reset_tag(addr), size, value, init); } static inline void kasan_unpoison(const void *addr, size_t size, bool init) { u8 tag = get_tag(addr); - addr = kasan_reset_tag(addr); - - /* Skip KFENCE memory if called explicitly outside of sl*b. */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; size = round_up(size, KASAN_GRANULE_SIZE); - hw_set_mem_tag_range((void *)addr, size, tag, init); + hw_set_mem_tag_range(kasan_reset_tag(addr), size, tag, init); } static inline bool kasan_byte_accessible(const void *addr) diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 0154d200be40..30625303d01a 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -135,10 +135,6 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init) */ addr = kasan_reset_tag(addr); - /* Skip KFENCE memory if called explicitly outside of sl*b. */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; if (WARN_ON(size & KASAN_GRANULE_MASK)) @@ -175,14 +171,6 @@ void kasan_unpoison(const void *addr, size_t size, bool init) */ addr = kasan_reset_tag(addr); - /* - * Skip KFENCE memory if called explicitly outside of sl*b. Also note - * that calls to ksize(), where size is not a multiple of machine-word - * size, would otherwise poison the invalid portion of the word. - */ - if (is_kfence_address(addr)) - return; - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; -- cgit v1.2.3 From 58ee788cb23738abd57d6327d0b5096df5a53d31 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:49 +0100 Subject: kasan: respect CONFIG_KASAN_VMALLOC for kasan_flag_vmalloc Never enable the kasan_flag_vmalloc static branch unless CONFIG_KASAN_VMALLOC is enabled. This does not fix any observable bugs (vmalloc annotations for the HW_TAGS mode are no-op with CONFIG_KASAN_VMALLOC disabled) but rather just cleans up the code. Link: https://lkml.kernel.org/r/3e5c933c8f6b59bd587efb05c407964be951772c.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/hw_tags.c | 7 +++++++ mm/kasan/kasan.h | 1 + 2 files changed, 8 insertions(+) (limited to 'mm/kasan') diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 06141bbc1e51..80f11a3eccd5 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -57,7 +57,11 @@ enum kasan_mode kasan_mode __ro_after_init; EXPORT_SYMBOL_GPL(kasan_mode); /* Whether to enable vmalloc tagging. */ +#ifdef CONFIG_KASAN_VMALLOC DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc); +#else +DEFINE_STATIC_KEY_FALSE(kasan_flag_vmalloc); +#endif #define PAGE_ALLOC_SAMPLE_DEFAULT 1 #define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT 3 @@ -119,6 +123,9 @@ static int __init early_kasan_flag_vmalloc(char *arg) if (!arg) return -EINVAL; + if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) + return 0; + if (!strcmp(arg, "off")) kasan_arg_vmalloc = KASAN_ARG_VMALLOC_OFF; else if (!strcmp(arg, "on")) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 5fbcc1b805bc..dee105ba32dd 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -49,6 +49,7 @@ DECLARE_PER_CPU(long, kasan_page_alloc_skip); static inline bool kasan_vmalloc_enabled(void) { + /* Static branch is never enabled with CONFIG_KASAN_VMALLOC disabled. */ return static_branch_likely(&kasan_flag_vmalloc); } -- cgit v1.2.3 From 14c99b990cccd924ae6101bd36b85f8456eabb10 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:50 +0100 Subject: kasan: check kasan_vmalloc_enabled in vmalloc tests Check that vmalloc poisoning is not disabled via command line when running the vmalloc-related KASAN tests. Skip the tests otherwise. Link: https://lkml.kernel.org/r/954456e50ac98519910c3e24a479a18eae62f8dd.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/hw_tags.c | 1 + mm/kasan/kasan.h | 5 +++++ mm/kasan/kasan_test.c | 11 ++++++++++- 3 files changed, 16 insertions(+), 1 deletion(-) (limited to 'mm/kasan') diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 80f11a3eccd5..2b994092a2d4 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -62,6 +62,7 @@ DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc); #else DEFINE_STATIC_KEY_FALSE(kasan_flag_vmalloc); #endif +EXPORT_SYMBOL_GPL(kasan_flag_vmalloc); #define PAGE_ALLOC_SAMPLE_DEFAULT 1 #define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT 3 diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index dee105ba32dd..acc1a9410f0d 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -83,6 +83,11 @@ static inline bool kasan_sample_page_alloc(unsigned int order) #else /* CONFIG_KASAN_HW_TAGS */ +static inline bool kasan_vmalloc_enabled(void) +{ + return IS_ENABLED(CONFIG_KASAN_VMALLOC); +} + static inline bool kasan_async_fault_possible(void) { return false; diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index aa994b62378b..9b1024a6e580 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -1552,6 +1552,9 @@ static void vmalloc_helpers_tags(struct kunit *test) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); + if (!kasan_vmalloc_enabled()) + kunit_skip(test, "Test requires kasan.vmalloc=on"); + ptr = vmalloc(PAGE_SIZE); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); @@ -1586,6 +1589,9 @@ static void vmalloc_oob(struct kunit *test) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); + if (!kasan_vmalloc_enabled()) + kunit_skip(test, "Test requires kasan.vmalloc=on"); + v_ptr = vmalloc(size); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr); @@ -1639,6 +1645,9 @@ static void vmap_tags(struct kunit *test) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); + if (!kasan_vmalloc_enabled()) + kunit_skip(test, "Test requires kasan.vmalloc=on"); + p_page = alloc_pages(GFP_KERNEL, 1); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page); p_ptr = page_address(p_page); @@ -1757,7 +1766,7 @@ static void match_all_not_assigned(struct kunit *test) free_pages((unsigned long)ptr, order); } - if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) + if (!kasan_vmalloc_enabled()) return; for (i = 0; i < 256; i++) { -- cgit v1.2.3 From f2fffc0cfcfa9ed2ed7448705a5e187cabee6af4 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:51 +0100 Subject: kasan: export kasan_poison as GPL KASAN uses EXPORT_SYMBOL_GPL for symbols whose exporting is only required for KASAN tests when they are built as a module. kasan_poison is one on those symbols, so export it as GPL. Link: https://lkml.kernel.org/r/171d0b8b2e807d04cca74f973830f9b169e06fb8.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/shadow.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/kasan') diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 30625303d01a..9ef84f31833f 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -145,7 +145,7 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init) __memset(shadow_start, value, shadow_end - shadow_start); } -EXPORT_SYMBOL(kasan_poison); +EXPORT_SYMBOL_GPL(kasan_poison); #ifdef CONFIG_KASAN_GENERIC void kasan_poison_last_granule(const void *addr, size_t size) -- cgit v1.2.3 From 3ab9304db6ab13a8d4b5f99ef0bbd9302f26c741 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:52 +0100 Subject: kasan: remove SLUB checks for page_alloc fallbacks in tests A number of KASAN tests rely on the fact that calling kmalloc with a size larger than an order-1 page falls back onto page_alloc. This fallback was originally only implemented for SLUB, but since commit d6a71648dbc0 ("mm/slab: kmalloc: pass requests larger than order-1 page to page allocator"), it is also implemented for SLAB. Thus, drop the SLUB checks from the tests. Link: https://lkml.kernel.org/r/c82099b6fb365b6f4c2c21b112d4abb4dfd83e53.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/kasan_test.c | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index 9b1024a6e580..691c15fc7cdb 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -215,7 +215,7 @@ static void kmalloc_node_oob_right(struct kunit *test) /* * Check that KASAN detects an out-of-bounds access for a big object allocated - * via kmalloc(). But not as big as to trigger the page_alloc fallback for SLUB. + * via kmalloc(). But not as big as to trigger the page_alloc fallback. */ static void kmalloc_big_oob_right(struct kunit *test) { @@ -233,8 +233,7 @@ static void kmalloc_big_oob_right(struct kunit *test) /* * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk * that does not fit into the largest slab cache and therefore is allocated via - * the page_alloc fallback for SLUB. SLAB has no such fallback, and thus these - * tests are not supported for it. + * the page_alloc fallback. */ static void kmalloc_large_oob_right(struct kunit *test) @@ -242,8 +241,6 @@ static void kmalloc_large_oob_right(struct kunit *test) char *ptr; size_t size = KMALLOC_MAX_CACHE_SIZE + 10; - KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); - ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); @@ -258,8 +255,6 @@ static void kmalloc_large_uaf(struct kunit *test) char *ptr; size_t size = KMALLOC_MAX_CACHE_SIZE + 10; - KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); - ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); kfree(ptr); @@ -272,8 +267,6 @@ static void kmalloc_large_invalid_free(struct kunit *test) char *ptr; size_t size = KMALLOC_MAX_CACHE_SIZE + 10; - KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); - ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); @@ -407,18 +400,12 @@ static void krealloc_less_oob(struct kunit *test) static void krealloc_large_more_oob(struct kunit *test) { - /* page_alloc fallback is only implemented for SLUB. */ - KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); - krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201, KMALLOC_MAX_CACHE_SIZE + 235); } static void krealloc_large_less_oob(struct kunit *test) { - /* page_alloc fallback is only implemented for SLUB. */ - KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); - krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235, KMALLOC_MAX_CACHE_SIZE + 201); } @@ -1156,9 +1143,6 @@ static void mempool_kmalloc_large_uaf(struct kunit *test) size_t size = KMALLOC_MAX_CACHE_SIZE + 1; void *extra_elem; - /* page_alloc fallback is only implemented for SLUB. */ - KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); - extra_elem = mempool_prepare_kmalloc(test, &pool, size); mempool_uaf_helper(test, &pool, false); @@ -1227,9 +1211,6 @@ static void mempool_kmalloc_large_double_free(struct kunit *test) size_t size = KMALLOC_MAX_CACHE_SIZE + 1; char *extra_elem; - /* page_alloc fallback is only implemented for SLUB. */ - KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); - extra_elem = mempool_prepare_kmalloc(test, &pool, size); mempool_double_free_helper(test, &pool); @@ -1284,9 +1265,6 @@ static void mempool_kmalloc_large_invalid_free(struct kunit *test) size_t size = KMALLOC_MAX_CACHE_SIZE + 1; char *extra_elem; - /* page_alloc fallback is only implemented for SLUB. */ - KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); - extra_elem = mempool_prepare_kmalloc(test, &pool, size); mempool_kmalloc_invalid_free_helper(test, &pool); -- cgit v1.2.3 From 4e397274e10beb385cee1a04c7850c1c46ae5d3e Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 21:04:53 +0100 Subject: kasan: speed up match_all_mem_tag test for SW_TAGS Checking all 256 possible tag values in the match_all_mem_tag KASAN test is slow and produces 256 reports. Instead, just check the first 8 and the last 8. Link: https://lkml.kernel.org/r/6fe51262defd80cdc1150c42404977aafd1b6167.1703188911.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/kasan_test.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'mm/kasan') diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index 691c15fc7cdb..971cfff4ca0b 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -1797,6 +1797,14 @@ static void match_all_mem_tag(struct kunit *test) /* For each possible tag value not matching the pointer tag. */ for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) { + /* + * For Software Tag-Based KASAN, skip the majority of tag + * values to avoid the test printing too many reports. + */ + if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) && + tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8) + continue; + if (tag == get_tag(ptr)) continue; -- cgit v1.2.3 From f6940e8adc64f584dcfb2960f6e4b6a54ea9c508 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 19:35:37 +0100 Subject: kasan: clean up kasan_cache_create Reorganize the code to avoid nested if/else checks to improve the readability. Also drop the confusing comments about KMALLOC_MAX_SIZE checks: they are relevant for both SLUB and SLAB (originally, the comments likely confused KMALLOC_MAX_SIZE with KMALLOC_MAX_CACHE_SIZE). Link: https://lkml.kernel.org/r/20231221183540.168428-1-andrey.konovalov@linux.dev Fixes: a5989d4ed40c ("kasan: improve free meta storage in Generic KASAN") Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Juntong Deng Signed-off-by: Andrew Morton --- mm/kasan/generic.c | 67 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 39 insertions(+), 28 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 55e6b5db2cae..5b3308127ce0 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -382,16 +382,11 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, ok_size = *size; - /* Add alloc meta into redzone. */ + /* Add alloc meta into the redzone. */ cache->kasan_info.alloc_meta_offset = *size; *size += sizeof(struct kasan_alloc_meta); - /* - * If alloc meta doesn't fit, don't add it. - * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal - * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for - * larger sizes. - */ + /* If alloc meta doesn't fit, don't add it. */ if (*size > KMALLOC_MAX_SIZE) { cache->kasan_info.alloc_meta_offset = 0; *size = ok_size; @@ -402,36 +397,52 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset; /* - * Add free meta into redzone when it's not possible to store + * Store free meta in the redzone when it's not possible to store * it in the object. This is the case when: * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can * be touched after it was freed, or * 2. Object has a constructor, which means it's expected to - * retain its content until the next allocation, or - * 3. Object is too small and SLUB DEBUG is enabled. Avoid - * free meta that exceeds the object size corrupts the - * SLUB DEBUG metadata. - * Otherwise cache->kasan_info.free_meta_offset = 0 is implied. - * If the object is smaller than the free meta and SLUB DEBUG - * is not enabled, it is still possible to store part of the - * free meta in the object. + * retain its content until the next allocation. */ if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) { cache->kasan_info.free_meta_offset = *size; *size += sizeof(struct kasan_free_meta); - } else if (cache->object_size < sizeof(struct kasan_free_meta)) { - if (__slub_debug_enabled()) { - cache->kasan_info.free_meta_offset = *size; - *size += sizeof(struct kasan_free_meta); - } else { - rem_free_meta_size = sizeof(struct kasan_free_meta) - - cache->object_size; - *size += rem_free_meta_size; - if (cache->kasan_info.alloc_meta_offset != 0) - cache->kasan_info.alloc_meta_offset += rem_free_meta_size; - } + goto free_meta_added; + } + + /* + * Otherwise, if the object is large enough to contain free meta, + * store it within the object. + */ + if (sizeof(struct kasan_free_meta) <= cache->object_size) { + /* cache->kasan_info.free_meta_offset = 0 is implied. */ + goto free_meta_added; } + /* + * For smaller objects, store the beginning of free meta within the + * object and the end in the redzone. And thus shift the location of + * alloc meta to free up space for free meta. + * This is only possible when slub_debug is disabled, as otherwise + * the end of free meta will overlap with slub_debug metadata. + */ + if (!__slub_debug_enabled()) { + rem_free_meta_size = sizeof(struct kasan_free_meta) - + cache->object_size; + *size += rem_free_meta_size; + if (cache->kasan_info.alloc_meta_offset != 0) + cache->kasan_info.alloc_meta_offset += rem_free_meta_size; + goto free_meta_added; + } + + /* + * If the object is small and slub_debug is enabled, store free meta + * in the redzone after alloc meta. + */ + cache->kasan_info.free_meta_offset = *size; + *size += sizeof(struct kasan_free_meta); + +free_meta_added: /* If free meta doesn't fit, don't add it. */ if (*size > KMALLOC_MAX_SIZE) { cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; @@ -441,7 +452,7 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, /* Calculate size with optimal redzone. */ optimal_size = cache->object_size + optimal_redzone(cache->object_size); - /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */ + /* Limit it with KMALLOC_MAX_SIZE. */ if (optimal_size > KMALLOC_MAX_SIZE) optimal_size = KMALLOC_MAX_SIZE; /* Use optimal size if the size with added metas is not large enough. */ -- cgit v1.2.3 From 04afc540e58e8f66fd85b0b88af3e8ce286be17c Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 19:35:38 +0100 Subject: kasan: reuse kasan_track in kasan_stack_ring_entry Avoid duplicating fields of kasan_track in kasan_stack_ring_entry: reuse the structure. Link: https://lkml.kernel.org/r/20231221183540.168428-2-andrey.konovalov@linux.dev Fixes: 5d4c6ac94694 ("kasan: record and report more information") Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Juntong Deng Signed-off-by: Andrew Morton --- mm/kasan/kasan.h | 7 +------ mm/kasan/report_tags.c | 12 ++++++------ mm/kasan/tags.c | 12 ++++++------ 3 files changed, 13 insertions(+), 18 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index acc1a9410f0d..a280bb04c0e6 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -293,13 +293,8 @@ struct kasan_free_meta { struct kasan_stack_ring_entry { void *ptr; size_t size; - u32 pid; - depot_stack_handle_t stack; + struct kasan_track track; bool is_free; -#ifdef CONFIG_KASAN_EXTRA_INFO - u64 cpu:20; - u64 timestamp:44; -#endif /* CONFIG_KASAN_EXTRA_INFO */ }; struct kasan_stack_ring { diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c index 979f284c2497..688b9d70b04a 100644 --- a/mm/kasan/report_tags.c +++ b/mm/kasan/report_tags.c @@ -31,8 +31,8 @@ static const char *get_common_bug_type(struct kasan_report_info *info) static void kasan_complete_extra_report_info(struct kasan_track *track, struct kasan_stack_ring_entry *entry) { - track->cpu = entry->cpu; - track->timestamp = entry->timestamp; + track->cpu = entry->track.cpu; + track->timestamp = entry->track.timestamp; } #endif /* CONFIG_KASAN_EXTRA_INFO */ @@ -80,8 +80,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) if (free_found) break; - info->free_track.pid = entry->pid; - info->free_track.stack = entry->stack; + info->free_track.pid = entry->track.pid; + info->free_track.stack = entry->track.stack; #ifdef CONFIG_KASAN_EXTRA_INFO kasan_complete_extra_report_info(&info->free_track, entry); #endif /* CONFIG_KASAN_EXTRA_INFO */ @@ -98,8 +98,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) if (alloc_found) break; - info->alloc_track.pid = entry->pid; - info->alloc_track.stack = entry->stack; + info->alloc_track.pid = entry->track.pid; + info->alloc_track.stack = entry->track.stack; #ifdef CONFIG_KASAN_EXTRA_INFO kasan_complete_extra_report_info(&info->alloc_track, entry); #endif /* CONFIG_KASAN_EXTRA_INFO */ diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index c13b198b8302..c4d14dbf27c0 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -100,8 +100,8 @@ static void save_extra_info(struct kasan_stack_ring_entry *entry) u32 cpu = raw_smp_processor_id(); u64 ts_nsec = local_clock(); - entry->cpu = cpu; - entry->timestamp = ts_nsec >> 3; + entry->track.cpu = cpu; + entry->track.timestamp = ts_nsec >> 3; } #endif /* CONFIG_KASAN_EXTRA_INFO */ @@ -134,15 +134,15 @@ next: if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR)) goto next; /* Busy slot. */ - old_stack = entry->stack; + old_stack = entry->track.stack; entry->size = cache->object_size; - entry->pid = current->pid; - entry->stack = stack; - entry->is_free = is_free; + entry->track.pid = current->pid; + entry->track.stack = stack; #ifdef CONFIG_KASAN_EXTRA_INFO save_extra_info(entry); #endif /* CONFIG_KASAN_EXTRA_INFO */ + entry->is_free = is_free; entry->ptr = object; -- cgit v1.2.3 From fd4064f69708bc84d960b666896715fe86882c85 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 19:35:39 +0100 Subject: kasan: simplify saving extra info into tracks Avoid duplicating code for saving extra info into tracks: reuse the common function for this. Link: https://lkml.kernel.org/r/20231221183540.168428-3-andrey.konovalov@linux.dev Fixes: 5d4c6ac94694 ("kasan: record and report more information") Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Juntong Deng Signed-off-by: Andrew Morton --- mm/kasan/common.c | 12 ++++++++++-- mm/kasan/generic.c | 4 ++-- mm/kasan/kasan.h | 3 ++- mm/kasan/tags.c | 17 +---------------- 4 files changed, 15 insertions(+), 21 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 86adf80cc11a..a486e9b1ac68 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -48,7 +48,7 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags) return stack_depot_save_flags(entries, nr_entries, flags, depot_flags); } -void kasan_set_track(struct kasan_track *track, gfp_t flags) +void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack) { #ifdef CONFIG_KASAN_EXTRA_INFO u32 cpu = raw_smp_processor_id(); @@ -58,8 +58,16 @@ void kasan_set_track(struct kasan_track *track, gfp_t flags) track->timestamp = ts_nsec >> 3; #endif /* CONFIG_KASAN_EXTRA_INFO */ track->pid = current->pid; - track->stack = kasan_save_stack(flags, + track->stack = stack; +} + +void kasan_save_track(struct kasan_track *track, gfp_t flags) +{ + depot_stack_handle_t stack; + + stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET); + kasan_set_track(track, stack); } #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 5b3308127ce0..0e77c43c559e 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -579,7 +579,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) stack_depot_put(alloc_meta->aux_stack[1]); __memset(alloc_meta, 0, sizeof(*alloc_meta)); - kasan_set_track(&alloc_meta->alloc_track, flags); + kasan_save_track(&alloc_meta->alloc_track, flags); } void kasan_save_free_info(struct kmem_cache *cache, void *object) @@ -590,7 +590,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object) if (!free_meta) return; - kasan_set_track(&free_meta->free_track, 0); + kasan_save_track(&free_meta->free_track, 0); /* The object was freed and has free track set. */ *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK; } diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index a280bb04c0e6..814e89523c64 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -392,7 +392,8 @@ static inline void kasan_init_object_meta(struct kmem_cache *cache, const void * #endif depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags); -void kasan_set_track(struct kasan_track *track, gfp_t flags); +void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack); +void kasan_save_track(struct kasan_track *track, gfp_t flags); void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags); void kasan_save_free_info(struct kmem_cache *cache, void *object); diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index c4d14dbf27c0..d65d48b85f90 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -94,17 +94,6 @@ void __init kasan_init_tags(void) } } -#ifdef CONFIG_KASAN_EXTRA_INFO -static void save_extra_info(struct kasan_stack_ring_entry *entry) -{ - u32 cpu = raw_smp_processor_id(); - u64 ts_nsec = local_clock(); - - entry->track.cpu = cpu; - entry->track.timestamp = ts_nsec >> 3; -} -#endif /* CONFIG_KASAN_EXTRA_INFO */ - static void save_stack_info(struct kmem_cache *cache, void *object, gfp_t gfp_flags, bool is_free) { @@ -137,11 +126,7 @@ next: old_stack = entry->track.stack; entry->size = cache->object_size; - entry->track.pid = current->pid; - entry->track.stack = stack; -#ifdef CONFIG_KASAN_EXTRA_INFO - save_extra_info(entry); -#endif /* CONFIG_KASAN_EXTRA_INFO */ + kasan_set_track(&entry->track, stack); entry->is_free = is_free; entry->ptr = object; -- cgit v1.2.3 From a3fbe303ec9db45dbabc923ea9c5323900176079 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 21 Dec 2023 19:35:40 +0100 Subject: kasan: simplify kasan_complete_mode_report_info for tag-based modes memcpy the alloc/free tracks when collecting the information about a bad access instead of copying fields one by one. Link: https://lkml.kernel.org/r/20231221183540.168428-4-andrey.konovalov@linux.dev Fixes: 5d4c6ac94694 ("kasan: record and report more information") Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Juntong Deng Signed-off-by: Andrew Morton --- mm/kasan/report_tags.c | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c index 688b9d70b04a..d15f8f580e2c 100644 --- a/mm/kasan/report_tags.c +++ b/mm/kasan/report_tags.c @@ -27,15 +27,6 @@ static const char *get_common_bug_type(struct kasan_report_info *info) return "invalid-access"; } -#ifdef CONFIG_KASAN_EXTRA_INFO -static void kasan_complete_extra_report_info(struct kasan_track *track, - struct kasan_stack_ring_entry *entry) -{ - track->cpu = entry->track.cpu; - track->timestamp = entry->track.timestamp; -} -#endif /* CONFIG_KASAN_EXTRA_INFO */ - void kasan_complete_mode_report_info(struct kasan_report_info *info) { unsigned long flags; @@ -80,11 +71,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) if (free_found) break; - info->free_track.pid = entry->track.pid; - info->free_track.stack = entry->track.stack; -#ifdef CONFIG_KASAN_EXTRA_INFO - kasan_complete_extra_report_info(&info->free_track, entry); -#endif /* CONFIG_KASAN_EXTRA_INFO */ + memcpy(&info->free_track, &entry->track, + sizeof(info->free_track)); free_found = true; /* @@ -98,11 +86,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) if (alloc_found) break; - info->alloc_track.pid = entry->track.pid; - info->alloc_track.stack = entry->track.stack; -#ifdef CONFIG_KASAN_EXTRA_INFO - kasan_complete_extra_report_info(&info->alloc_track, entry); -#endif /* CONFIG_KASAN_EXTRA_INFO */ + memcpy(&info->alloc_track, &entry->track, + sizeof(info->alloc_track)); alloc_found = true; /* -- cgit v1.2.3 From 63b85ac56a6498476fb34402c10a3f431f62f35c Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 26 Dec 2023 23:51:21 +0100 Subject: kasan: stop leaking stack trace handles Commit 773688a6cb24 ("kasan: use stack_depot_put for Generic mode") added support for stack trace eviction for Generic KASAN. However, that commit didn't evict stack traces when the object is not put into quarantine. As a result, some stack traces are never evicted from the stack depot. In addition, with the "kasan: save mempool stack traces" series, the free stack traces for mempool objects are also not properly evicted from the stack depot. Fix both issues by: 1. Evicting all stack traces when an object if freed if it was not put into quarantine; 2. Always evicting an existing free stack trace when a new one is saved. Also do a few related clean-ups: - Do not zero out free track when initializing/invalidating free meta: set a value in shadow memory instead; - Rename KASAN_SLAB_FREETRACK to KASAN_SLAB_FREE_META; - Drop the kasan_init_cache_meta function as it's not used by KASAN; - Add comments for the kasan_alloc_meta and kasan_free_meta structs. [akpm@linux-foundation.org: make release_free_meta() and release_alloc_meta() static] Link: https://lkml.kernel.org/r/20231226225121.235865-1-andrey.konovalov@linux.dev Fixes: 773688a6cb24 ("kasan: use stack_depot_put for Generic mode") Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Signed-off-by: Andrew Morton --- mm/kasan/common.c | 27 +++++++++++++++++---- mm/kasan/generic.c | 60 ++++++++++++++++++++++++++++++++++++++++------- mm/kasan/kasan.h | 25 +++++++++++++++----- mm/kasan/quarantine.c | 20 +--------------- mm/kasan/report_generic.c | 6 ++--- 5 files changed, 97 insertions(+), 41 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index a486e9b1ac68..223af53d4338 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -255,14 +255,33 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object, bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip, bool init) { - bool buggy_object; - if (is_kfence_address(object)) return false; - buggy_object = poison_slab_object(cache, object, ip, init); + /* + * If the object is buggy, do not let slab put the object onto the + * freelist. The object will thus never be allocated again and its + * metadata will never get released. + */ + if (poison_slab_object(cache, object, ip, init)) + return true; + + /* + * If the object is put into quarantine, do not let slab put the object + * onto the freelist for now. The object's metadata is kept until the + * object gets evicted from quarantine. + */ + if (kasan_quarantine_put(cache, object)) + return true; + + /* + * If the object is not put into quarantine, it will likely be quickly + * reallocated. Thus, release its metadata now. + */ + kasan_release_object_meta(cache, object); - return buggy_object ? true : kasan_quarantine_put(cache, object); + /* Let slab put the object onto the freelist. */ + return false; } static inline bool check_page_allocation(void *ptr, unsigned long ip) diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 0e77c43c559e..24c13dfb1e94 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -480,10 +480,10 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { struct kasan_alloc_meta *alloc_meta; - struct kasan_free_meta *free_meta; alloc_meta = kasan_get_alloc_meta(cache, object); if (alloc_meta) { + /* Zero out alloc meta to mark it as invalid. */ __memset(alloc_meta, 0, sizeof(*alloc_meta)); /* @@ -495,9 +495,50 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object) raw_spin_lock_init(&alloc_meta->aux_lock); kasan_enable_current(); } + + /* + * Explicitly marking free meta as invalid is not required: the shadow + * value for the first 8 bytes of a newly allocated object is not + * KASAN_SLAB_FREE_META. + */ +} + +static void release_alloc_meta(struct kasan_alloc_meta *meta) +{ + /* Evict the stack traces from stack depot. */ + stack_depot_put(meta->alloc_track.stack); + stack_depot_put(meta->aux_stack[0]); + stack_depot_put(meta->aux_stack[1]); + + /* Zero out alloc meta to mark it as invalid. */ + __memset(meta, 0, sizeof(*meta)); +} + +static void release_free_meta(const void *object, struct kasan_free_meta *meta) +{ + /* Check if free meta is valid. */ + if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META) + return; + + /* Evict the stack trace from the stack depot. */ + stack_depot_put(meta->free_track.stack); + + /* Mark free meta as invalid. */ + *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; +} + +void kasan_release_object_meta(struct kmem_cache *cache, const void *object) +{ + struct kasan_alloc_meta *alloc_meta; + struct kasan_free_meta *free_meta; + + alloc_meta = kasan_get_alloc_meta(cache, object); + if (alloc_meta) + release_alloc_meta(alloc_meta); + free_meta = kasan_get_free_meta(cache, object); if (free_meta) - __memset(free_meta, 0, sizeof(*free_meta)); + release_free_meta(object, free_meta); } size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) @@ -573,11 +614,8 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) if (!alloc_meta) return; - /* Evict previous stack traces (might exist for krealloc). */ - stack_depot_put(alloc_meta->alloc_track.stack); - stack_depot_put(alloc_meta->aux_stack[0]); - stack_depot_put(alloc_meta->aux_stack[1]); - __memset(alloc_meta, 0, sizeof(*alloc_meta)); + /* Evict previous stack traces (might exist for krealloc or mempool). */ + release_alloc_meta(alloc_meta); kasan_save_track(&alloc_meta->alloc_track, flags); } @@ -590,7 +628,11 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object) if (!free_meta) return; + /* Evict previous stack trace (might exist for mempool). */ + release_free_meta(object, free_meta); + kasan_save_track(&free_meta->free_track, 0); - /* The object was freed and has free track set. */ - *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK; + + /* Mark free meta as valid. */ + *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE_META; } diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 814e89523c64..645ae04539c9 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -156,7 +156,7 @@ static inline bool kasan_requires_meta(void) #ifdef CONFIG_KASAN_GENERIC -#define KASAN_SLAB_FREETRACK 0xFA /* freed slab object with free track */ +#define KASAN_SLAB_FREE_META 0xFA /* freed slab object with free meta */ #define KASAN_GLOBAL_REDZONE 0xF9 /* redzone for global variable */ /* Stack redzone shadow values. Compiler ABI, do not change. */ @@ -253,6 +253,15 @@ struct kasan_global { #ifdef CONFIG_KASAN_GENERIC +/* + * Alloc meta contains the allocation-related information about a slab object. + * Alloc meta is saved when an object is allocated and is kept until either the + * object returns to the slab freelist (leaves quarantine for quarantined + * objects or gets freed for the non-quarantined ones) or reallocated via + * krealloc or through a mempool. + * Alloc meta is stored inside of the object's redzone. + * Alloc meta is considered valid whenever it contains non-zero data. + */ struct kasan_alloc_meta { struct kasan_track alloc_track; /* Free track is stored in kasan_free_meta. */ @@ -278,8 +287,12 @@ struct qlist_node { #define KASAN_NO_FREE_META INT_MAX /* - * Free meta is only used by Generic mode while the object is in quarantine. - * After that, slab allocator stores the freelist pointer in the object. + * Free meta contains the freeing-related information about a slab object. + * Free meta is only kept for quarantined objects and for mempool objects until + * the object gets allocated again. + * Free meta is stored within the object's memory. + * Free meta is considered valid whenever the value of the shadow byte that + * corresponds to the first 8 bytes of the object is KASAN_SLAB_FREE_META. */ struct kasan_free_meta { struct qlist_node quarantine_link; @@ -380,15 +393,15 @@ void kasan_report_invalid_free(void *object, unsigned long ip, enum kasan_report struct slab *kasan_addr_to_slab(const void *addr); #ifdef CONFIG_KASAN_GENERIC -void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size); -void kasan_init_object_meta(struct kmem_cache *cache, const void *object); struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, const void *object); struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, const void *object); +void kasan_init_object_meta(struct kmem_cache *cache, const void *object); +void kasan_release_object_meta(struct kmem_cache *cache, const void *object); #else -static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size) { } static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { } +static inline void kasan_release_object_meta(struct kmem_cache *cache, const void *object) { } #endif depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags); diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 782e045da911..8afa77bc5d3b 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -143,22 +143,10 @@ static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) { void *object = qlink_to_object(qlink, cache); - struct kasan_alloc_meta *alloc_meta = kasan_get_alloc_meta(cache, object); struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object); unsigned long flags; - if (alloc_meta) { - stack_depot_put(alloc_meta->alloc_track.stack); - stack_depot_put(alloc_meta->aux_stack[0]); - stack_depot_put(alloc_meta->aux_stack[1]); - __memset(alloc_meta, 0, sizeof(*alloc_meta)); - } - - if (free_meta && - *(u8 *)kasan_mem_to_shadow(object) == KASAN_SLAB_FREETRACK) { - stack_depot_put(free_meta->free_track.stack); - __memset(&free_meta->free_track, 0, sizeof(free_meta->free_track)); - } + kasan_release_object_meta(cache, object); /* * If init_on_free is enabled and KASAN's free metadata is stored in @@ -170,12 +158,6 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) cache->kasan_info.free_meta_offset == 0) memzero_explicit(free_meta, sizeof(*free_meta)); - /* - * As the object now gets freed from the quarantine, - * take note that its free track is no longer exists. - */ - *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; - if (IS_ENABLED(CONFIG_SLAB)) local_irq_save(flags); diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c index 99cbcd73cff7..f5b8e37b3805 100644 --- a/mm/kasan/report_generic.c +++ b/mm/kasan/report_generic.c @@ -110,7 +110,7 @@ static const char *get_shadow_bug_type(struct kasan_report_info *info) bug_type = "use-after-free"; break; case KASAN_SLAB_FREE: - case KASAN_SLAB_FREETRACK: + case KASAN_SLAB_FREE_META: bug_type = "slab-use-after-free"; break; case KASAN_ALLOCA_LEFT: @@ -173,8 +173,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info) memcpy(&info->alloc_track, &alloc_meta->alloc_track, sizeof(info->alloc_track)); - if (*(u8 *)kasan_mem_to_shadow(info->object) == KASAN_SLAB_FREETRACK) { - /* Free meta must be present with KASAN_SLAB_FREETRACK. */ + if (*(u8 *)kasan_mem_to_shadow(info->object) == KASAN_SLAB_FREE_META) { + /* Free meta must be present with KASAN_SLAB_FREE_META. */ free_meta = kasan_get_free_meta(info->cache, info->object); memcpy(&info->free_track, &free_meta->free_track, sizeof(info->free_track)); -- cgit v1.2.3