summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorDaniel Axtens <dja@axtens.net>2019-12-17 20:51:49 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-12-17 20:59:59 -0800
commit253a496d8e57275d458eb3c988470525b0b2c545 (patch)
tree13cb1452959d971018c4db8d7d0a7659884d7c2e /mm/vmalloc.c
parente218f1ca3971e5bcaae1fe8e6f007f9a206e32e9 (diff)
kasan: don't assume percpu shadow allocations will succeed
syzkaller and the fault injector showed that I was wrong to assume that we could ignore percpu shadow allocation failures. Handle failures properly. Merge all the allocated areas back into the free list and release the shadow, then clean up and return NULL. The shadow is released unconditionally, which relies upon the fact that the release function is able to tolerate pages not being present. Also clean up shadows in the recovery path - currently they are not released, which leaks a bit of memory. Link: http://lkml.kernel.org/r/20191205140407.1874-3-dja@axtens.net Fixes: 3c5c3cfb9ef4 ("kasan: support backing vmalloc space with real shadow memory") Signed-off-by: Daniel Axtens <dja@axtens.net> Reported-by: syzbot+82e323920b78d54aaed5@syzkaller.appspotmail.com Reported-by: syzbot+59b7daa4315e07a994f1@syzkaller.appspotmail.com Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Alexander Potapenko <glider@google.com> Cc: Qian Cai <cai@lca.pw> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c48
1 files changed, 38 insertions, 10 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6e865cea846c..e9681dc4aa75 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3288,7 +3288,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
struct vmap_area **vas, *va;
struct vm_struct **vms;
int area, area2, last_area, term_area;
- unsigned long base, start, size, end, last_end;
+ unsigned long base, start, size, end, last_end, orig_start, orig_end;
bool purged = false;
enum fit_type type;
@@ -3418,6 +3418,15 @@ retry:
spin_unlock(&free_vmap_area_lock);
+ /* populate the kasan shadow space */
+ for (area = 0; area < nr_vms; area++) {
+ if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
+ goto err_free_shadow;
+
+ kasan_unpoison_vmalloc((void *)vas[area]->va_start,
+ sizes[area]);
+ }
+
/* insert all vm's */
spin_lock(&vmap_area_lock);
for (area = 0; area < nr_vms; area++) {
@@ -3428,13 +3437,6 @@ retry:
}
spin_unlock(&vmap_area_lock);
- /* populate the shadow space outside of the lock */
- for (area = 0; area < nr_vms; area++) {
- /* assume success here */
- kasan_populate_vmalloc(vas[area]->va_start, sizes[area]);
- kasan_unpoison_vmalloc((void *)vms[area]->addr, sizes[area]);
- }
-
kfree(vas);
return vms;
@@ -3446,8 +3448,12 @@ recovery:
* and when pcpu_get_vm_areas() is success.
*/
while (area--) {
- merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
- &free_vmap_area_list);
+ orig_start = vas[area]->va_start;
+ orig_end = vas[area]->va_end;
+ va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
+ &free_vmap_area_list);
+ kasan_release_vmalloc(orig_start, orig_end,
+ va->va_start, va->va_end);
vas[area] = NULL;
}
@@ -3482,6 +3488,28 @@ err_free2:
kfree(vas);
kfree(vms);
return NULL;
+
+err_free_shadow:
+ spin_lock(&free_vmap_area_lock);
+ /*
+ * We release all the vmalloc shadows, even the ones for regions that
+ * hadn't been successfully added. This relies on kasan_release_vmalloc
+ * being able to tolerate this case.
+ */
+ for (area = 0; area < nr_vms; area++) {
+ orig_start = vas[area]->va_start;
+ orig_end = vas[area]->va_end;
+ va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
+ &free_vmap_area_list);
+ kasan_release_vmalloc(orig_start, orig_end,
+ va->va_start, va->va_end);
+ vas[area] = NULL;
+ kfree(vms[area]);
+ }
+ spin_unlock(&free_vmap_area_lock);
+ kfree(vas);
+ kfree(vms);
+ return NULL;
}
/**