From cd62734ca60dbb2ab5bb19c8d837dd9990955310 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Wed, 30 Jun 2021 18:54:12 -0700 Subject: mm/rmap: split try_to_munlock from try_to_unmap The behaviour of try_to_unmap_one() is difficult to follow because it performs different operations based on a fairly large set of flags used in different combinations. TTU_MUNLOCK is one such flag. However it is exclusively used by try_to_munlock() which specifies no other flags. Therefore rather than overload try_to_unmap_one() with unrelated behaviour split this out into it's own function and remove the flag. Link: https://lkml.kernel.org/r/20210616105937.23201-4-apopple@nvidia.com Signed-off-by: Alistair Popple Reviewed-by: Ralph Campbell Reviewed-by: Christoph Hellwig Cc: Ben Skeggs Cc: Hugh Dickins Cc: Jason Gunthorpe Cc: John Hubbard Cc: "Matthew Wilcox (Oracle)" Cc: Peter Xu Cc: Shakeel Butt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 49 insertions(+), 17 deletions(-) (limited to 'mm/rmap.c') diff --git a/mm/rmap.c b/mm/rmap.c index b9986c8db524..73c0ff1c73ab 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1411,10 +1411,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, if (flags & TTU_SYNC) pvmw.flags = PVMW_SYNC; - /* munlock has nothing to gain from examining un-locked vmas */ - if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) - return true; - if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && is_zone_device_page(page) && !is_device_private_page(page)) return true; @@ -1476,8 +1472,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, page_vma_mapped_walk_done(&pvmw); break; } - if (flags & TTU_MUNLOCK) - continue; } /* Unexpected PMD-mapped THP? */ @@ -1790,20 +1784,58 @@ void try_to_unmap(struct page *page, enum ttu_flags flags) rmap_walk(page, &rwc); } +/* + * Walks the vma's mapping a page and mlocks the page if any locked vma's are + * found. Once one is found the page is locked and the scan can be terminated. + */ +static bool page_mlock_one(struct page *page, struct vm_area_struct *vma, + unsigned long address, void *unused) +{ + struct page_vma_mapped_walk pvmw = { + .page = page, + .vma = vma, + .address = address, + }; + + /* An un-locked vma doesn't have any pages to lock, continue the scan */ + if (!(vma->vm_flags & VM_LOCKED)) + return true; + + while (page_vma_mapped_walk(&pvmw)) { + /* + * Need to recheck under the ptl to serialise with + * __munlock_pagevec_fill() after VM_LOCKED is cleared in + * munlock_vma_pages_range(). + */ + if (vma->vm_flags & VM_LOCKED) { + /* PTE-mapped THP are never mlocked */ + if (!PageTransCompound(page)) + mlock_vma_page(page); + page_vma_mapped_walk_done(&pvmw); + } + + /* + * no need to continue scanning other vma's if the page has + * been locked. + */ + return false; + } + + return true; +} + /** - * try_to_munlock - try to munlock a page - * @page: the page to be munlocked + * page_mlock - try to mlock a page + * @page: the page to be mlocked * - * Called from munlock code. Checks all of the VMAs mapping the page - * to make sure nobody else has this page mlocked. The page will be - * returned with PG_mlocked cleared if no other vmas have it mlocked. + * Called from munlock code. Checks all of the VMAs mapping the page and mlocks + * the page if any are found. The page will be returned with PG_mlocked cleared + * if it is not mapped by any locked vmas. */ - -void try_to_munlock(struct page *page) +void page_mlock(struct page *page) { struct rmap_walk_control rwc = { - .rmap_one = try_to_unmap_one, - .arg = (void *)TTU_MUNLOCK, + .rmap_one = page_mlock_one, .done = page_not_mapped, .anon_lock = page_lock_anon_vma_read, @@ -1855,7 +1887,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the anon_vma struct it points to. * - * When called from try_to_munlock(), the mmap_lock of the mm containing the vma + * When called from page_mlock(), the mmap_lock of the mm containing the vma * where the page was found will be held for write. So, we won't recheck * vm_flags for that VMA. That should be OK, because that vma shouldn't be * LOCKED. @@ -1908,7 +1940,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the address_space struct it points to. * - * When called from try_to_munlock(), the mmap_lock of the mm containing the vma + * When called from page_mlock(), the mmap_lock of the mm containing the vma * where the page was found will be held for write. So, we won't recheck * vm_flags for that VMA. That should be OK, because that vma shouldn't be * LOCKED. -- cgit v1.2.3