summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorJoel Fernandes (Google) <joel@joelfernandes.org>2023-09-04 18:08:04 +0000
committerAndrew Morton <akpm@linux-foundation.org>2023-09-05 10:13:45 -0700
commit0818e739b5c061b0251c30152380600fb9b84c0c (patch)
treee46a53aed8e55375d0fe8b04491a89966b57b765 /mm/vmalloc.c
parent7f33105cdd59a99d068d3d147723a865d10e2260 (diff)
mm/vmalloc: add a safer version of find_vm_area() for debug
It is unsafe to dump vmalloc area information when trying to do so from some contexts. Add a safer trylock version of the same function to do a best-effort VMA finding and use it from vmalloc_dump_obj(). [applied test robot feedback on unused function fix.] [applied Uladzislau feedback on locking.] Link: https://lkml.kernel.org/r/20230904180806.1002832-1-joel@joelfernandes.org Fixes: 98f180837a89 ("mm: Make mem_dump_obj() handle vmalloc() memory") Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Reported-by: Zhen Lei <thunder.leizhen@huaweicloud.com> Cc: Paul E. McKenney <paulmck@kernel.org> Cc: Zqiang <qiang.zhang1211@gmail.com> Cc: <stable@vger.kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c26
1 files changed, 22 insertions, 4 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 228a4a5312f2..ef8599d394fd 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4278,14 +4278,32 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
#ifdef CONFIG_PRINTK
bool vmalloc_dump_obj(void *object)
{
- struct vm_struct *vm;
void *objp = (void *)PAGE_ALIGN((unsigned long)object);
+ const void *caller;
+ struct vm_struct *vm;
+ struct vmap_area *va;
+ unsigned long addr;
+ unsigned int nr_pages;
- vm = find_vm_area(objp);
- if (!vm)
+ if (!spin_trylock(&vmap_area_lock))
+ return false;
+ va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
+ if (!va) {
+ spin_unlock(&vmap_area_lock);
return false;
+ }
+
+ vm = va->vm;
+ if (!vm) {
+ spin_unlock(&vmap_area_lock);
+ return false;
+ }
+ addr = (unsigned long)vm->addr;
+ caller = vm->caller;
+ nr_pages = vm->nr_pages;
+ spin_unlock(&vmap_area_lock);
pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
- vm->nr_pages, (unsigned long)vm->addr, vm->caller);
+ nr_pages, addr, caller);
return true;
}
#endif