summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-02-11 17:04:33 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2019-02-11 18:30:37 -0500
commit4fb75705334badee5a02dfef9a20e1bfb1867008 (patch)
treec21caaa8d167de2d84be3c58f53a271390a66785 /mm
parent826f68524ed2caa514dc0af1a052d3c0b46ebc9d (diff)
Propagate gfp_t when allocating pte entries from __vmalloc
This fixes a lockdep recursion when using __vmalloc from places that aren't GFP_KERNEL safe. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c11
-rw-r--r--mm/kasan/kasan_init.c9
-rw-r--r--mm/memory.c51
-rw-r--r--mm/migrate.c6
-rw-r--r--mm/mremap.c6
-rw-r--r--mm/userfaultfd.c6
-rw-r--r--mm/vmalloc.c49
-rw-r--r--mm/zsmalloc.c2
8 files changed, 76 insertions, 64 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a80832487981..dce41e3f449b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4658,7 +4658,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
spinlock_t *ptl;
if (!vma_shareable(vma, addr))
- return (pte_t *)pmd_alloc(mm, pud, addr);
+ return (pte_t *)pmd_alloc(mm, pud, addr, GFP_KERNEL);
i_mmap_lock_write(mapping);
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
@@ -4689,7 +4689,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
}
spin_unlock(ptl);
out:
- pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ pte = (pte_t *)pmd_alloc(mm, pud, addr, GFP_KERNEL);
i_mmap_unlock_write(mapping);
return pte;
}
@@ -4751,10 +4751,10 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
- p4d = p4d_alloc(mm, pgd, addr);
+ p4d = p4d_alloc(mm, pgd, addr, GFP_KERNEL);
if (!p4d)
return NULL;
- pud = pud_alloc(mm, p4d, addr);
+ pud = pud_alloc(mm, p4d, addr, GFP_KERNEL);
if (pud) {
if (sz == PUD_SIZE) {
pte = (pte_t *)pud;
@@ -4763,7 +4763,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
if (want_pmd_share() && pud_none(*pud))
pte = huge_pmd_share(mm, addr, pud);
else
- pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ pte = (pte_t *)pmd_alloc(mm, pud, addr,
+ GFP_KERNEL);
}
}
BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c
index c7550eb65922..5579bba63ead 100644
--- a/mm/kasan/kasan_init.c
+++ b/mm/kasan/kasan_init.c
@@ -120,7 +120,8 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
pte_t *p;
if (slab_is_available())
- p = pte_alloc_one_kernel(&init_mm, addr);
+ p = pte_alloc_one_kernel(&init_mm, addr,
+ GFP_KERNEL);
else
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
if (!p)
@@ -155,7 +156,7 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
pmd_t *p;
if (slab_is_available()) {
- p = pmd_alloc(&init_mm, pud, addr);
+ p = pmd_alloc(&init_mm, pud, addr, GFP_KERNEL);
if (!p)
return -ENOMEM;
} else {
@@ -194,7 +195,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
pud_t *p;
if (slab_is_available()) {
- p = pud_alloc(&init_mm, p4d, addr);
+ p = pud_alloc(&init_mm, p4d, addr, GFP_KERNEL);
if (!p)
return -ENOMEM;
} else {
@@ -263,7 +264,7 @@ int __ref kasan_populate_zero_shadow(const void *shadow_start,
p4d_t *p;
if (slab_is_available()) {
- p = p4d_alloc(&init_mm, pgd, addr);
+ p = p4d_alloc(&init_mm, pgd, addr, GFP_KERNEL);
if (!p)
return -ENOMEM;
} else {
diff --git a/mm/memory.c b/mm/memory.c
index 4ad2d293ddc2..3be7c7c0379f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -434,9 +434,9 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
return 0;
}
-int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
+int __pte_alloc_kernel(pmd_t *pmd, unsigned long address, gfp_t gfp)
{
- pte_t *new = pte_alloc_one_kernel(&init_mm, address);
+ pte_t *new = pte_alloc_one_kernel(&init_mm, address, gfp);
if (!new)
return -ENOMEM;
@@ -883,7 +883,7 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
pmd_t *src_pmd, *dst_pmd;
unsigned long next;
- dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
+ dst_pmd = pmd_alloc(dst_mm, dst_pud, addr, GFP_KERNEL);
if (!dst_pmd)
return -ENOMEM;
src_pmd = pmd_offset(src_pud, addr);
@@ -917,7 +917,7 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src
pud_t *src_pud, *dst_pud;
unsigned long next;
- dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
+ dst_pud = pud_alloc(dst_mm, dst_p4d, addr, GFP_KERNEL);
if (!dst_pud)
return -ENOMEM;
src_pud = pud_offset(src_p4d, addr);
@@ -951,7 +951,7 @@ static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src
p4d_t *src_p4d, *dst_p4d;
unsigned long next;
- dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
+ dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr, GFP_KERNEL);
if (!dst_p4d)
return -ENOMEM;
src_p4d = p4d_offset(src_pgd, addr);
@@ -1421,13 +1421,13 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
- p4d = p4d_alloc(mm, pgd, addr);
+ p4d = p4d_alloc(mm, pgd, addr, GFP_KERNEL);
if (!p4d)
return NULL;
- pud = pud_alloc(mm, p4d, addr);
+ pud = pud_alloc(mm, p4d, addr, GFP_KERNEL);
if (!pud)
return NULL;
- pmd = pmd_alloc(mm, pud, addr);
+ pmd = pmd_alloc(mm, pud, addr, GFP_KERNEL);
if (!pmd)
return NULL;
@@ -1764,7 +1764,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
int err;
pfn -= addr >> PAGE_SHIFT;
- pmd = pmd_alloc(mm, pud, addr);
+ pmd = pmd_alloc(mm, pud, addr, GFP_KERNEL);
if (!pmd)
return -ENOMEM;
VM_BUG_ON(pmd_trans_huge(*pmd));
@@ -1787,7 +1787,7 @@ static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
int err;
pfn -= addr >> PAGE_SHIFT;
- pud = pud_alloc(mm, p4d, addr);
+ pud = pud_alloc(mm, p4d, addr, GFP_KERNEL);
if (!pud)
return -ENOMEM;
do {
@@ -1809,7 +1809,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
int err;
pfn -= addr >> PAGE_SHIFT;
- p4d = p4d_alloc(mm, pgd, addr);
+ p4d = p4d_alloc(mm, pgd, addr, GFP_KERNEL);
if (!p4d)
return -ENOMEM;
do {
@@ -1948,7 +1948,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
spinlock_t *uninitialized_var(ptl);
pte = (mm == &init_mm) ?
- pte_alloc_kernel(pmd, addr) :
+ pte_alloc_kernel(pmd, addr, GFP_KERNEL) :
pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
@@ -1982,7 +1982,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
- pmd = pmd_alloc(mm, pud, addr);
+ pmd = pmd_alloc(mm, pud, addr, GFP_KERNEL);
if (!pmd)
return -ENOMEM;
do {
@@ -2002,7 +2002,7 @@ static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
unsigned long next;
int err;
- pud = pud_alloc(mm, p4d, addr);
+ pud = pud_alloc(mm, p4d, addr, GFP_KERNEL);
if (!pud)
return -ENOMEM;
do {
@@ -2022,7 +2022,7 @@ static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
int err;
- p4d = p4d_alloc(mm, pgd, addr);
+ p4d = p4d_alloc(mm, pgd, addr, GFP_KERNEL);
if (!p4d)
return -ENOMEM;
do {
@@ -3823,11 +3823,11 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
vm_fault_t ret;
pgd = pgd_offset(mm, address);
- p4d = p4d_alloc(mm, pgd, address);
+ p4d = p4d_alloc(mm, pgd, address, GFP_KERNEL);
if (!p4d)
return VM_FAULT_OOM;
- vmf.pud = pud_alloc(mm, p4d, address);
+ vmf.pud = pud_alloc(mm, p4d, address, GFP_KERNEL);
if (!vmf.pud)
return VM_FAULT_OOM;
if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
@@ -3853,7 +3853,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
}
}
- vmf.pmd = pmd_alloc(mm, vmf.pud, address);
+ vmf.pmd = pmd_alloc(mm, vmf.pud, address, GFP_KERNEL);
if (!vmf.pmd)
return VM_FAULT_OOM;
if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
@@ -3946,9 +3946,10 @@ EXPORT_SYMBOL_GPL(handle_mm_fault);
* Allocate p4d page table.
* We've already handled the fast-path in-line.
*/
-int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address,
+ gfp_t gfp)
{
- p4d_t *new = p4d_alloc_one(mm, address);
+ p4d_t *new = p4d_alloc_one(mm, address, gfp);
if (!new)
return -ENOMEM;
@@ -3969,9 +3970,10 @@ int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
* Allocate page upper directory.
* We've already handled the fast-path in-line.
*/
-int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
+int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address,
+ gfp_t gfp)
{
- pud_t *new = pud_alloc_one(mm, address);
+ pud_t *new = pud_alloc_one(mm, address, gfp);
if (!new)
return -ENOMEM;
@@ -4001,10 +4003,11 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
* Allocate page middle directory.
* We've already handled the fast-path in-line.
*/
-int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address,
+ gfp_t gfp)
{
spinlock_t *ptl;
- pmd_t *new = pmd_alloc_one(mm, address);
+ pmd_t *new = pmd_alloc_one(mm, address, gfp);
if (!new)
return -ENOMEM;
diff --git a/mm/migrate.c b/mm/migrate.c
index f7e4bfdc13b7..3ad160ebf948 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2576,13 +2576,13 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
goto abort;
pgdp = pgd_offset(mm, addr);
- p4dp = p4d_alloc(mm, pgdp, addr);
+ p4dp = p4d_alloc(mm, pgdp, addr, GFP_KERNEL);
if (!p4dp)
goto abort;
- pudp = pud_alloc(mm, p4dp, addr);
+ pudp = pud_alloc(mm, p4dp, addr, GFP_KERNEL);
if (!pudp)
goto abort;
- pmdp = pmd_alloc(mm, pudp, addr);
+ pmdp = pmd_alloc(mm, pudp, addr, GFP_KERNEL);
if (!pmdp)
goto abort;
diff --git a/mm/mremap.c b/mm/mremap.c
index 7f9f9180e401..50dbd76874aa 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -65,14 +65,14 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
- p4d = p4d_alloc(mm, pgd, addr);
+ p4d = p4d_alloc(mm, pgd, addr, GFP_KERNEL);
if (!p4d)
return NULL;
- pud = pud_alloc(mm, p4d, addr);
+ pud = pud_alloc(mm, p4d, addr, GFP_KERNEL);
if (!pud)
return NULL;
- pmd = pmd_alloc(mm, pud, addr);
+ pmd = pmd_alloc(mm, pud, addr, GFP_KERNEL);
if (!pmd)
return NULL;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 458acda96f20..8279df1ca178 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -153,10 +153,10 @@ static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
pud_t *pud;
pgd = pgd_offset(mm, address);
- p4d = p4d_alloc(mm, pgd, address);
+ p4d = p4d_alloc(mm, pgd, address, GFP_KERNEL);
if (!p4d)
return NULL;
- pud = pud_alloc(mm, p4d, address);
+ pud = pud_alloc(mm, p4d, address, GFP_KERNEL);
if (!pud)
return NULL;
/*
@@ -164,7 +164,7 @@ static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
* missing, the *pmd may be already established and in
* turn it may also be a trans_huge_pmd.
*/
- return pmd_alloc(mm, pud, address);
+ return pmd_alloc(mm, pud, address, GFP_KERNEL);
}
#ifdef CONFIG_HUGETLB_PAGE
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 97d4b25d0373..3b8777ae1901 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -132,7 +132,8 @@ static void vunmap_page_range(unsigned long addr, unsigned long end)
}
static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, pgprot_t prot, struct page **pages, int *nr)
+ unsigned long end, gfp_t gfp, pgprot_t prot,
+ struct page **pages, int *nr)
{
pte_t *pte;
@@ -141,7 +142,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
* callers keep track of where we're up to.
*/
- pte = pte_alloc_kernel(pmd, addr);
+ pte = pte_alloc_kernel(pmd, addr, gfp);
if (!pte)
return -ENOMEM;
do {
@@ -158,51 +159,54 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
}
static int vmap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end, pgprot_t prot, struct page **pages, int *nr)
+ unsigned long end, gfp_t gfp, pgprot_t prot,
+ struct page **pages, int *nr)
{
pmd_t *pmd;
unsigned long next;
- pmd = pmd_alloc(&init_mm, pud, addr);
+ pmd = pmd_alloc(&init_mm, pud, addr, gfp);
if (!pmd)
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
- if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
+ if (vmap_pte_range(pmd, addr, next, gfp, prot, pages, nr))
return -ENOMEM;
} while (pmd++, addr = next, addr != end);
return 0;
}
static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
- unsigned long end, pgprot_t prot, struct page **pages, int *nr)
+ unsigned long end, gfp_t gfp, pgprot_t prot,
+ struct page **pages, int *nr)
{
pud_t *pud;
unsigned long next;
- pud = pud_alloc(&init_mm, p4d, addr);
+ pud = pud_alloc(&init_mm, p4d, addr, gfp);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
- if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
+ if (vmap_pmd_range(pud, addr, next, gfp, prot, pages, nr))
return -ENOMEM;
} while (pud++, addr = next, addr != end);
return 0;
}
static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
- unsigned long end, pgprot_t prot, struct page **pages, int *nr)
+ unsigned long end, gfp_t gfp, pgprot_t prot,
+ struct page **pages, int *nr)
{
p4d_t *p4d;
unsigned long next;
- p4d = p4d_alloc(&init_mm, pgd, addr);
+ p4d = p4d_alloc(&init_mm, pgd, addr, gfp);
if (!p4d)
return -ENOMEM;
do {
next = p4d_addr_end(addr, end);
- if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
+ if (vmap_pud_range(p4d, addr, next, gfp, prot, pages, nr))
return -ENOMEM;
} while (p4d++, addr = next, addr != end);
return 0;
@@ -215,7 +219,8 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
* Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
*/
static int vmap_page_range_noflush(unsigned long start, unsigned long end,
- pgprot_t prot, struct page **pages)
+ gfp_t gfp, pgprot_t prot,
+ struct page **pages)
{
pgd_t *pgd;
unsigned long next;
@@ -227,7 +232,7 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end,
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
+ err = vmap_p4d_range(pgd, addr, next, gfp, prot, pages, &nr);
if (err)
return err;
} while (pgd++, addr = next, addr != end);
@@ -236,11 +241,11 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end,
}
static int vmap_page_range(unsigned long start, unsigned long end,
- pgprot_t prot, struct page **pages)
+ gfp_t gfp, pgprot_t prot, struct page **pages)
{
int ret;
- ret = vmap_page_range_noflush(start, end, prot, pages);
+ ret = vmap_page_range_noflush(start, end, gfp, prot, pages);
flush_cache_vmap(start, end);
return ret;
}
@@ -1178,7 +1183,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
addr = va->va_start;
mem = (void *)addr;
}
- if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
+ if (vmap_page_range(addr, addr + size, GFP_KERNEL, prot, pages) < 0) {
vm_unmap_ram(mem, count);
return NULL;
}
@@ -1293,7 +1298,8 @@ void __init vmalloc_init(void)
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
pgprot_t prot, struct page **pages)
{
- return vmap_page_range_noflush(addr, addr + size, prot, pages);
+ return vmap_page_range_noflush(addr, addr + size, GFP_KERNEL, prot,
+ pages);
}
/**
@@ -1334,13 +1340,14 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
}
EXPORT_SYMBOL_GPL(unmap_kernel_range);
-int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
+int map_vm_area(struct vm_struct *area, gfp_t gfp,
+ pgprot_t prot, struct page **pages)
{
unsigned long addr = (unsigned long)area->addr;
unsigned long end = addr + get_vm_area_size(area);
int err;
- err = vmap_page_range(addr, end, prot, pages);
+ err = vmap_page_range(addr, end, gfp, prot, pages);
return err > 0 ? 0 : err;
}
@@ -1642,7 +1649,7 @@ void *vmap(struct page **pages, unsigned int count,
if (!area)
return NULL;
- if (map_vm_area(area, prot, pages)) {
+ if (map_vm_area(area, GFP_KERNEL, prot, pages)) {
vunmap(area->addr);
return NULL;
}
@@ -1701,7 +1708,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
cond_resched();
}
- if (map_vm_area(area, prot, pages))
+ if (map_vm_area(area, gfp_mask, prot, pages))
goto fail;
return area->addr;
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 0787d33b80d8..d369e5bf2711 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1151,7 +1151,7 @@ static inline void __zs_cpu_down(struct mapping_area *area)
static inline void *__zs_map_object(struct mapping_area *area,
struct page *pages[2], int off, int size)
{
- BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages));
+ BUG_ON(map_vm_area(area->vm, GFP_KERNEL, PAGE_KERNEL, pages));
area->vm_addr = area->vm->addr;
return area->vm_addr + off;
}