diff options
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 37 |
1 files changed, 28 insertions, 9 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index b2ec7f751bd0..d5cd52805149 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -25,6 +25,7 @@ #include <linux/notifier.h> #include <linux/rbtree.h> #include <linux/xarray.h> +#include <linux/io.h> #include <linux/rcupdate.h> #include <linux/pfn.h> #include <linux/kmemleak.h> @@ -36,6 +37,7 @@ #include <linux/overflow.h> #include <linux/pgtable.h> #include <linux/uaccess.h> +#include <linux/hugetlb.h> #include <asm/tlbflush.h> #include <asm/shmparam.h> @@ -83,10 +85,11 @@ static void free_work(struct work_struct *w) /*** Page table manipulation functions ***/ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, - pgtbl_mod_mask *mask) + unsigned int max_page_shift, pgtbl_mod_mask *mask) { pte_t *pte; u64 pfn; + unsigned long size = PAGE_SIZE; pfn = phys_addr >> PAGE_SHIFT; pte = pte_alloc_kernel_track(pmd, addr, mask); @@ -94,9 +97,22 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, return -ENOMEM; do { BUG_ON(!pte_none(*pte)); + +#ifdef CONFIG_HUGETLB_PAGE + size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); + if (size != PAGE_SIZE) { + pte_t entry = pfn_pte(pfn, prot); + + entry = pte_mkhuge(entry); + entry = arch_make_huge_pte(entry, ilog2(size), 0); + set_huge_pte_at(&init_mm, addr, pte, entry); + pfn += PFN_DOWN(size); + continue; + } +#endif set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); pfn++; - } while (pte++, addr += PAGE_SIZE, addr != end); + } while (pte += PFN_DOWN(size), addr += size, addr != end); *mask |= PGTBL_PTE_MODIFIED; return 0; } @@ -145,7 +161,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, continue; } - if (vmap_pte_range(pmd, addr, next, phys_addr, prot, mask)) + if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) return -ENOMEM; } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); return 0; @@ -1592,6 +1608,7 @@ static DEFINE_MUTEX(vmap_purge_lock); /* for per-CPU blocks */ static void purge_fragmented_blocks_allcpus(void); +#ifdef CONFIG_X86_64 /* * called before a call to iounmap() if the caller wants vm_area_struct's * immediately freed. @@ -1600,6 +1617,7 @@ void set_iounmap_nonlazy(void) { atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1); } +#endif /* CONFIG_X86_64 */ /* * Purges all lazily-freed vmap areas. @@ -2912,8 +2930,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, return NULL; } - if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP) && - arch_vmap_pmd_supported(prot)) { + if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) { unsigned long size_per_node; /* @@ -2926,11 +2943,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, size_per_node = size; if (node == NUMA_NO_NODE) size_per_node /= num_online_nodes(); - if (size_per_node >= PMD_SIZE) { + if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE) shift = PMD_SHIFT; - align = max(real_align, 1UL << shift); - size = ALIGN(real_size, 1UL << shift); - } + else + shift = arch_vmap_pte_supported_shift(size_per_node); + + align = max(real_align, 1UL << shift); + size = ALIGN(real_size, 1UL << shift); } again: |