summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c47
1 files changed, 31 insertions, 16 deletions
diff --git a/mm/memory.c b/mm/memory.c
index feff48e1465a..6bfa5bd33658 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -387,13 +387,18 @@ void free_pgd_range(struct mmu_gather *tlb,
} while (pgd++, addr = next, addr != end);
}
-void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long floor, unsigned long ceiling)
+void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
+ struct vm_area_struct *vma, unsigned long floor, unsigned long ceiling)
{
- while (vma) {
- struct vm_area_struct *next = vma->vm_next;
+ struct vm_area_struct *next;
+ struct ma_state ma_next = *mas;
+
+ do {
unsigned long addr = vma->vm_start;
+ next = mas_find(&ma_next, ceiling - 1);
+ BUG_ON(vma->vm_start < floor);
+ BUG_ON(vma->vm_end - 1 > ceiling - 1);
/*
* Hide vma from rmap and truncate_pagecache before freeing
* pgtables
@@ -410,16 +415,20 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE
&& !is_vm_hugetlb_page(next)) {
+ *mas = ma_next;
vma = next;
- next = vma->vm_next;
+ next = mas_find(&ma_next, ceiling - 1);
+ BUG_ON(vma->vm_start < floor);
+ BUG_ON(vma->vm_end -1 > ceiling - 1);
unlink_anon_vmas(vma);
unlink_file_vma(vma);
}
free_pgd_range(tlb, addr, vma->vm_end,
floor, next ? next->vm_start : ceiling);
}
+ *mas = ma_next;
vma = next;
- }
+ } while (vma);
}
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
@@ -1504,16 +1513,19 @@ static void unmap_single_vma(struct mmu_gather *tlb,
* drops the lock and schedules.
*/
void unmap_vmas(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start_addr,
- unsigned long end_addr)
+ struct vm_area_struct *vma, struct ma_state *mas,
+ unsigned long start_addr, unsigned long end_addr)
{
struct mmu_notifier_range range;
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
start_addr, end_addr);
mmu_notifier_invalidate_range_start(&range);
- for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
+ do {
+ BUG_ON(vma->vm_start < start_addr);
+ BUG_ON(vma->vm_end > end_addr);
unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
+ } while ((vma = mas_find(mas, end_addr - 1)) != NULL);
mmu_notifier_invalidate_range_end(&range);
}
@@ -1530,17 +1542,20 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
{
struct mmu_notifier_range range;
struct mmu_gather tlb;
+ unsigned long end = start + size;
+ MA_STATE(mas, &vma->vm_mm->mm_mt, start, start);
lru_add_drain();
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
- start, start + size);
- tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
+ start, end);
+ tlb_gather_mmu(&tlb, vma->vm_mm, start, end);
update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range);
- for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
- unmap_single_vma(&tlb, vma, start, range.end, NULL);
+ do {
+ unmap_single_vma(&tlb, vma, start, end, NULL);
+ } while ((vma = mas_find(&mas, end - 1)) != NULL);
mmu_notifier_invalidate_range_end(&range);
- tlb_finish_mmu(&tlb, start, range.end);
+ tlb_finish_mmu(&tlb, start, end);
}
/**
@@ -4889,8 +4904,8 @@ int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
* Check if this is a VM_IO | VM_PFNMAP VMA, which
* we can access using slightly different code.
*/
- vma = find_vma(mm, addr);
- if (!vma || vma->vm_start > addr)
+ vma = find_vma_intersection(mm, addr, addr + 1);
+ if (!vma)
break;
if (vma->vm_ops && vma->vm_ops->access)
ret = vma->vm_ops->access(vma, addr, buf,