diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 535 |
1 files changed, 347 insertions, 188 deletions
diff --git a/mm/rmap.c b/mm/rmap.c index dd43373a483f..fcd593c9c997 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -49,6 +49,7 @@ #include <linux/swapops.h> #include <linux/slab.h> #include <linux/init.h> +#include <linux/ksm.h> #include <linux/rmap.h> #include <linux/rcupdate.h> #include <linux/module.h> @@ -61,17 +62,28 @@ #include "internal.h" static struct kmem_cache *anon_vma_cachep; +static struct kmem_cache *anon_vma_chain_cachep; static inline struct anon_vma *anon_vma_alloc(void) { return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); } -static inline void anon_vma_free(struct anon_vma *anon_vma) +void anon_vma_free(struct anon_vma *anon_vma) { kmem_cache_free(anon_vma_cachep, anon_vma); } +static inline struct anon_vma_chain *anon_vma_chain_alloc(void) +{ + return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); +} + +void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) +{ + kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); +} + /** * anon_vma_prepare - attach an anon_vma to a memory region * @vma: the memory region in question @@ -102,18 +114,23 @@ static inline void anon_vma_free(struct anon_vma *anon_vma) int anon_vma_prepare(struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; + struct anon_vma_chain *avc; might_sleep(); if (unlikely(!anon_vma)) { struct mm_struct *mm = vma->vm_mm; struct anon_vma *allocated; + avc = anon_vma_chain_alloc(); + if (!avc) + goto out_enomem; + anon_vma = find_mergeable_anon_vma(vma); allocated = NULL; if (!anon_vma) { anon_vma = anon_vma_alloc(); if (unlikely(!anon_vma)) - return -ENOMEM; + goto out_enomem_free_avc; allocated = anon_vma; } spin_lock(&anon_vma->lock); @@ -122,67 +139,140 @@ int anon_vma_prepare(struct vm_area_struct *vma) spin_lock(&mm->page_table_lock); if (likely(!vma->anon_vma)) { vma->anon_vma = anon_vma; - list_add_tail(&vma->anon_vma_node, &anon_vma->head); + avc->anon_vma = anon_vma; + avc->vma = vma; + list_add(&avc->same_vma, &vma->anon_vma_chain); + list_add(&avc->same_anon_vma, &anon_vma->head); allocated = NULL; } spin_unlock(&mm->page_table_lock); spin_unlock(&anon_vma->lock); - if (unlikely(allocated)) + if (unlikely(allocated)) { anon_vma_free(allocated); + anon_vma_chain_free(avc); + } } return 0; + + out_enomem_free_avc: + anon_vma_chain_free(avc); + out_enomem: + return -ENOMEM; } -void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) +static void anon_vma_chain_link(struct vm_area_struct *vma, + struct anon_vma_chain *avc, + struct anon_vma *anon_vma) { - BUG_ON(vma->anon_vma != next->anon_vma); - list_del(&next->anon_vma_node); + avc->vma = vma; + avc->anon_vma = anon_vma; + list_add(&avc->same_vma, &vma->anon_vma_chain); + + spin_lock(&anon_vma->lock); + list_add_tail(&avc->same_anon_vma, &anon_vma->head); + spin_unlock(&anon_vma->lock); } -void __anon_vma_link(struct vm_area_struct *vma) +/* + * Attach the anon_vmas from src to dst. + * Returns 0 on success, -ENOMEM on failure. + */ +int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) { - struct anon_vma *anon_vma = vma->anon_vma; + struct anon_vma_chain *avc, *pavc; + + list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { + avc = anon_vma_chain_alloc(); + if (!avc) + goto enomem_failure; + anon_vma_chain_link(dst, avc, pavc->anon_vma); + } + return 0; - if (anon_vma) - list_add_tail(&vma->anon_vma_node, &anon_vma->head); + enomem_failure: + unlink_anon_vmas(dst); + return -ENOMEM; } -void anon_vma_link(struct vm_area_struct *vma) +/* + * Attach vma to its own anon_vma, as well as to the anon_vmas that + * the corresponding VMA in the parent process is attached to. + * Returns 0 on success, non-zero on failure. + */ +int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) { - struct anon_vma *anon_vma = vma->anon_vma; + struct anon_vma_chain *avc; + struct anon_vma *anon_vma; - if (anon_vma) { - spin_lock(&anon_vma->lock); - list_add_tail(&vma->anon_vma_node, &anon_vma->head); - spin_unlock(&anon_vma->lock); - } + /* Don't bother if the parent process has no anon_vma here. */ + if (!pvma->anon_vma) + return 0; + + /* + * First, attach the new VMA to the parent VMA's anon_vmas, + * so rmap can find non-COWed pages in child processes. + */ + if (anon_vma_clone(vma, pvma)) + return -ENOMEM; + + /* Then add our own anon_vma. */ + anon_vma = anon_vma_alloc(); + if (!anon_vma) + goto out_error; + avc = anon_vma_chain_alloc(); + if (!avc) + goto out_error_free_anon_vma; + anon_vma_chain_link(vma, avc, anon_vma); + /* Mark this anon_vma as the one where our new (COWed) pages go. */ + vma->anon_vma = anon_vma; + + return 0; + + out_error_free_anon_vma: + anon_vma_free(anon_vma); + out_error: + return -ENOMEM; } -void anon_vma_unlink(struct vm_area_struct *vma) +static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) { - struct anon_vma *anon_vma = vma->anon_vma; + struct anon_vma *anon_vma = anon_vma_chain->anon_vma; int empty; + /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */ if (!anon_vma) return; spin_lock(&anon_vma->lock); - list_del(&vma->anon_vma_node); + list_del(&anon_vma_chain->same_anon_vma); /* We must garbage collect the anon_vma if it's empty */ - empty = list_empty(&anon_vma->head); + empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma); spin_unlock(&anon_vma->lock); if (empty) anon_vma_free(anon_vma); } +void unlink_anon_vmas(struct vm_area_struct *vma) +{ + struct anon_vma_chain *avc, *next; + + /* Unlink each anon_vma chained to the VMA. */ + list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { + anon_vma_unlink(avc); + list_del(&avc->same_vma); + anon_vma_chain_free(avc); + } +} + static void anon_vma_ctor(void *data) { struct anon_vma *anon_vma = data; spin_lock_init(&anon_vma->lock); + ksm_refcount_init(anon_vma); INIT_LIST_HEAD(&anon_vma->head); } @@ -190,6 +280,7 @@ void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); + anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC); } /* @@ -202,8 +293,8 @@ struct anon_vma *page_lock_anon_vma(struct page *page) unsigned long anon_mapping; rcu_read_lock(); - anon_mapping = (unsigned long) page->mapping; - if (!(anon_mapping & PAGE_MAPPING_ANON)) + anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); + if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!page_mapped(page)) goto out; @@ -248,8 +339,7 @@ vma_address(struct page *page, struct vm_area_struct *vma) unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { if (PageAnon(page)) { - if ((void *)vma->anon_vma != - (void *)page->mapping - PAGE_MAPPING_ANON) + if (vma->anon_vma != page_anon_vma(page)) return -EFAULT; } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { if (!vma->vm_file || @@ -337,21 +427,15 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) * Subfunctions of page_referenced: page_referenced_one called * repeatedly from either page_referenced_anon or page_referenced_file. */ -static int page_referenced_one(struct page *page, - struct vm_area_struct *vma, - unsigned int *mapcount, - unsigned long *vm_flags) +int page_referenced_one(struct page *page, struct vm_area_struct *vma, + unsigned long address, unsigned int *mapcount, + unsigned long *vm_flags) { struct mm_struct *mm = vma->vm_mm; - unsigned long address; pte_t *pte; spinlock_t *ptl; int referenced = 0; - address = vma_address(page, vma); - if (address == -EFAULT) - goto out; - pte = page_check_address(page, mm, address, &ptl, 0); if (!pte) goto out; @@ -388,9 +472,10 @@ static int page_referenced_one(struct page *page, out_unmap: (*mapcount)--; pte_unmap_unlock(pte, ptl); -out: + if (referenced) *vm_flags |= vma->vm_flags; +out: return referenced; } @@ -400,7 +485,7 @@ static int page_referenced_anon(struct page *page, { unsigned int mapcount; struct anon_vma *anon_vma; - struct vm_area_struct *vma; + struct anon_vma_chain *avc; int referenced = 0; anon_vma = page_lock_anon_vma(page); @@ -408,7 +493,11 @@ static int page_referenced_anon(struct page *page, return referenced; mapcount = page_mapcount(page); - list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { + list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { + struct vm_area_struct *vma = avc->vma; + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; /* * If we are reclaiming on behalf of a cgroup, skip * counting on behalf of references from different @@ -416,7 +505,7 @@ static int page_referenced_anon(struct page *page, */ if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) continue; - referenced += page_referenced_one(page, vma, + referenced += page_referenced_one(page, vma, address, &mapcount, vm_flags); if (!mapcount) break; @@ -474,6 +563,9 @@ static int page_referenced_file(struct page *page, mapcount = page_mapcount(page); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; /* * If we are reclaiming on behalf of a cgroup, skip * counting on behalf of references from different @@ -481,7 +573,7 @@ static int page_referenced_file(struct page *page, */ if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) continue; - referenced += page_referenced_one(page, vma, + referenced += page_referenced_one(page, vma, address, &mapcount, vm_flags); if (!mapcount) break; @@ -507,46 +599,44 @@ int page_referenced(struct page *page, unsigned long *vm_flags) { int referenced = 0; - - if (TestClearPageReferenced(page)) - referenced++; + int we_locked = 0; *vm_flags = 0; - if (page_mapped(page) && page->mapping) { - if (PageAnon(page)) + if (page_mapped(page) && page_rmapping(page)) { + if (!is_locked && (!PageAnon(page) || PageKsm(page))) { + we_locked = trylock_page(page); + if (!we_locked) { + referenced++; + goto out; + } + } + if (unlikely(PageKsm(page))) + referenced += page_referenced_ksm(page, mem_cont, + vm_flags); + else if (PageAnon(page)) referenced += page_referenced_anon(page, mem_cont, vm_flags); - else if (is_locked) + else if (page->mapping) referenced += page_referenced_file(page, mem_cont, vm_flags); - else if (!trylock_page(page)) - referenced++; - else { - if (page->mapping) - referenced += page_referenced_file(page, - mem_cont, vm_flags); + if (we_locked) unlock_page(page); - } } - +out: if (page_test_and_clear_young(page)) referenced++; return referenced; } -static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) +static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, + unsigned long address) { struct mm_struct *mm = vma->vm_mm; - unsigned long address; pte_t *pte; spinlock_t *ptl; int ret = 0; - address = vma_address(page, vma); - if (address == -EFAULT) - goto out; - pte = page_check_address(page, mm, address, &ptl, 1); if (!pte) goto out; @@ -578,8 +668,12 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page) spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { - if (vma->vm_flags & VM_SHARED) - ret += page_mkclean_one(page, vma); + if (vma->vm_flags & VM_SHARED) { + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret += page_mkclean_one(page, vma, address); + } } spin_unlock(&mapping->i_mmap_lock); return ret; @@ -607,6 +701,30 @@ int page_mkclean(struct page *page) EXPORT_SYMBOL_GPL(page_mkclean); /** + * page_move_anon_rmap - move a page to our anon_vma + * @page: the page to move to our anon_vma + * @vma: the vma the page belongs to + * @address: the user virtual address mapped + * + * When a page belongs exclusively to one process after a COW event, + * that page can be moved into the anon_vma that belongs to just that + * process, so the rmap code will not search the parent or sibling + * processes. + */ +void page_move_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + struct anon_vma *anon_vma = vma->anon_vma; + + VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON(!anon_vma); + VM_BUG_ON(page->index != linear_page_index(vma, address)); + + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; + page->mapping = (struct address_space *) anon_vma; +} + +/** * __page_set_anon_rmap - setup new anonymous rmap * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added @@ -620,14 +738,7 @@ static void __page_set_anon_rmap(struct page *page, BUG_ON(!anon_vma); anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page->mapping = (struct address_space *) anon_vma; - page->index = linear_page_index(vma, address); - - /* - * nr_mapped state can be updated without turning off - * interrupts because it is not modified via interrupt. - */ - __inc_zone_page_state(page, NR_ANON_PAGES); } /** @@ -652,9 +763,6 @@ static void __page_check_anon_rmap(struct page *page, * are initially only visible via the pagetables, and the pte is locked * over the call to page_add_new_anon_rmap. */ - struct anon_vma *anon_vma = vma->anon_vma; - anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; - BUG_ON(page->mapping != (struct address_space *)anon_vma); BUG_ON(page->index != linear_page_index(vma, address)); #endif } @@ -665,14 +773,23 @@ static void __page_check_anon_rmap(struct page *page, * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * - * The caller needs to hold the pte lock and the page must be locked. + * The caller needs to hold the pte lock, and the page must be locked in + * the anon_vma case: to serialize mapping,index checking after setting, + * and to ensure that PageAnon is not being upgraded racily to PageKsm + * (but PageKsm is never downgraded to PageAnon). */ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { + int first = atomic_inc_and_test(&page->_mapcount); + if (first) + __inc_zone_page_state(page, NR_ANON_PAGES); + if (unlikely(PageKsm(page))) + return; + VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); - if (atomic_inc_and_test(&page->_mapcount)) + if (first) __page_set_anon_rmap(page, vma, address); else __page_check_anon_rmap(page, vma, address); @@ -694,6 +811,7 @@ void page_add_new_anon_rmap(struct page *page, VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); SetPageSwapBacked(page); atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ + __inc_zone_page_state(page, NR_ANON_PAGES); __page_set_anon_rmap(page, vma, address); if (page_evictable(page, vma)) lru_cache_add_lru(page, LRU_ACTIVE_ANON); @@ -711,7 +829,7 @@ void page_add_file_rmap(struct page *page) { if (atomic_inc_and_test(&page->_mapcount)) { __inc_zone_page_state(page, NR_FILE_MAPPED); - mem_cgroup_update_mapped_file_stat(page, 1); + mem_cgroup_update_file_mapped(page, 1); } } @@ -743,8 +861,8 @@ void page_remove_rmap(struct page *page) __dec_zone_page_state(page, NR_ANON_PAGES); } else { __dec_zone_page_state(page, NR_FILE_MAPPED); + mem_cgroup_update_file_mapped(page, -1); } - mem_cgroup_update_mapped_file_stat(page, -1); /* * It would be tidy to reset the PageAnon mapping here, * but that might overwrite a racing page_add_anon_rmap @@ -760,20 +878,15 @@ void page_remove_rmap(struct page *page) * Subfunctions of try_to_unmap: try_to_unmap_one called * repeatedly from either try_to_unmap_anon or try_to_unmap_file. */ -static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, - enum ttu_flags flags) +int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, + unsigned long address, enum ttu_flags flags) { struct mm_struct *mm = vma->vm_mm; - unsigned long address; pte_t *pte; pte_t pteval; spinlock_t *ptl; int ret = SWAP_AGAIN; - address = vma_address(page, vma); - if (address == -EFAULT) - goto out; - pte = page_check_address(page, mm, address, &ptl, 0); if (!pte) goto out; @@ -784,10 +897,11 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * skipped over this mm) then we should reactivate it. */ if (!(flags & TTU_IGNORE_MLOCK)) { - if (vma->vm_flags & VM_LOCKED) { - ret = SWAP_MLOCK; + if (vma->vm_flags & VM_LOCKED) + goto out_mlock; + + if (TTU_ACTION(flags) == TTU_MUNLOCK) goto out_unmap; - } } if (!(flags & TTU_IGNORE_ACCESS)) { if (ptep_clear_flush_young_notify(vma, address, pte)) { @@ -809,9 +923,9 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { if (PageAnon(page)) - dec_mm_counter(mm, anon_rss); + dec_mm_counter(mm, MM_ANONPAGES); else - dec_mm_counter(mm, file_rss); + dec_mm_counter(mm, MM_FILEPAGES); set_pte_at(mm, address, pte, swp_entry_to_pte(make_hwpoison_entry(page))); } else if (PageAnon(page)) { @@ -822,14 +936,19 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * Store the swap location in the pte. * See handle_pte_fault() ... */ - swap_duplicate(entry); + if (swap_duplicate(entry) < 0) { + set_pte_at(mm, address, pte, pteval); + ret = SWAP_FAIL; + goto out_unmap; + } if (list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); if (list_empty(&mm->mmlist)) list_add(&mm->mmlist, &init_mm.mmlist); spin_unlock(&mmlist_lock); } - dec_mm_counter(mm, anon_rss); + dec_mm_counter(mm, MM_ANONPAGES); + inc_mm_counter(mm, MM_SWAPENTS); } else if (PAGE_MIGRATION) { /* * Store the pfn of the page in a special migration @@ -847,8 +966,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, entry = make_migration_entry(page, pte_write(pteval)); set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); } else - dec_mm_counter(mm, file_rss); - + dec_mm_counter(mm, MM_FILEPAGES); page_remove_rmap(page); page_cache_release(page); @@ -857,6 +975,27 @@ out_unmap: pte_unmap_unlock(pte, ptl); out: return ret; + +out_mlock: + pte_unmap_unlock(pte, ptl); + + + /* + * We need mmap_sem locking, Otherwise VM_LOCKED check makes + * unstable result and race. Plus, We can't wait here because + * we now hold anon_vma->lock or mapping->i_mmap_lock. + * if trylock failed, the page remain in evictable lru and later + * vmscan could retry to move the page to unevictable lru if the + * page is actually mlocked. + */ + if (down_read_trylock(&vma->vm_mm->mmap_sem)) { + if (vma->vm_flags & VM_LOCKED) { + mlock_vma_page(page); + ret = SWAP_MLOCK; + } + up_read(&vma->vm_mm->mmap_sem); + } + return ret; } /* @@ -922,11 +1061,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, return ret; /* - * MLOCK_PAGES => feature is configured. - * if we can acquire the mmap_sem for read, and vma is VM_LOCKED, + * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, * keep the sem while scanning the cluster for mlocking pages. */ - if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) { + if (down_read_trylock(&vma->vm_mm->mmap_sem)) { locked_vma = (vma->vm_flags & VM_LOCKED); if (!locked_vma) up_read(&vma->vm_mm->mmap_sem); /* don't need it */ @@ -967,7 +1105,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, page_remove_rmap(page); page_cache_release(page); - dec_mm_counter(mm, file_rss); + dec_mm_counter(mm, MM_FILEPAGES); (*mapcount)--; } pte_unmap_unlock(pte - 1, ptl); @@ -976,29 +1114,11 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, return ret; } -/* - * common handling for pages mapped in VM_LOCKED vmas - */ -static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma) -{ - int mlocked = 0; - - if (down_read_trylock(&vma->vm_mm->mmap_sem)) { - if (vma->vm_flags & VM_LOCKED) { - mlock_vma_page(page); - mlocked++; /* really mlocked the page */ - } - up_read(&vma->vm_mm->mmap_sem); - } - return mlocked; -} - /** * try_to_unmap_anon - unmap or unlock anonymous page using the object-based * rmap method * @page: the page to unmap/unlock - * @unlock: request for unlock rather than unmap [unlikely] - * @migration: unmapping for migration - ignored if @unlock + * @flags: action and flags * * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the anon_vma struct it points to. @@ -1013,43 +1133,24 @@ static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma) static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) { struct anon_vma *anon_vma; - struct vm_area_struct *vma; - unsigned int mlocked = 0; + struct anon_vma_chain *avc; int ret = SWAP_AGAIN; - int unlock = TTU_ACTION(flags) == TTU_MUNLOCK; - - if (MLOCK_PAGES && unlikely(unlock)) - ret = SWAP_SUCCESS; /* default for try_to_munlock() */ anon_vma = page_lock_anon_vma(page); if (!anon_vma) return ret; - list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { - if (MLOCK_PAGES && unlikely(unlock)) { - if (!((vma->vm_flags & VM_LOCKED) && - page_mapped_in_vma(page, vma))) - continue; /* must visit all unlocked vmas */ - ret = SWAP_MLOCK; /* saw at least one mlocked vma */ - } else { - ret = try_to_unmap_one(page, vma, flags); - if (ret == SWAP_FAIL || !page_mapped(page)) - break; - } - if (ret == SWAP_MLOCK) { - mlocked = try_to_mlock_page(page, vma); - if (mlocked) - break; /* stop if actually mlocked page */ - } + list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { + struct vm_area_struct *vma = avc->vma; + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret = try_to_unmap_one(page, vma, address, flags); + if (ret != SWAP_AGAIN || !page_mapped(page)) + break; } page_unlock_anon_vma(anon_vma); - - if (mlocked) - ret = SWAP_MLOCK; /* actually mlocked the page */ - else if (ret == SWAP_MLOCK) - ret = SWAP_AGAIN; /* saw VM_LOCKED vma */ - return ret; } @@ -1079,48 +1180,30 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) unsigned long max_nl_cursor = 0; unsigned long max_nl_size = 0; unsigned int mapcount; - unsigned int mlocked = 0; - int unlock = TTU_ACTION(flags) == TTU_MUNLOCK; - - if (MLOCK_PAGES && unlikely(unlock)) - ret = SWAP_SUCCESS; /* default for try_to_munlock() */ spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { - if (MLOCK_PAGES && unlikely(unlock)) { - if (!((vma->vm_flags & VM_LOCKED) && - page_mapped_in_vma(page, vma))) - continue; /* must visit all vmas */ - ret = SWAP_MLOCK; - } else { - ret = try_to_unmap_one(page, vma, flags); - if (ret == SWAP_FAIL || !page_mapped(page)) - goto out; - } - if (ret == SWAP_MLOCK) { - mlocked = try_to_mlock_page(page, vma); - if (mlocked) - break; /* stop if actually mlocked page */ - } + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret = try_to_unmap_one(page, vma, address, flags); + if (ret != SWAP_AGAIN || !page_mapped(page)) + goto out; } - if (mlocked) + if (list_empty(&mapping->i_mmap_nonlinear)) goto out; - if (list_empty(&mapping->i_mmap_nonlinear)) + /* + * We don't bother to try to find the munlocked page in nonlinears. + * It's costly. Instead, later, page reclaim logic may call + * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily. + */ + if (TTU_ACTION(flags) == TTU_MUNLOCK) goto out; list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { - if (MLOCK_PAGES && unlikely(unlock)) { - if (!(vma->vm_flags & VM_LOCKED)) - continue; /* must visit all vmas */ - ret = SWAP_MLOCK; /* leave mlocked == 0 */ - goto out; /* no need to look further */ - } - if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && - (vma->vm_flags & VM_LOCKED)) - continue; cursor = (unsigned long) vma->vm_private_data; if (cursor > max_nl_cursor) max_nl_cursor = cursor; @@ -1153,16 +1236,12 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) do { list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { - if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && - (vma->vm_flags & VM_LOCKED)) - continue; cursor = (unsigned long) vma->vm_private_data; while ( cursor < max_nl_cursor && cursor < vma->vm_end - vma->vm_start) { - ret = try_to_unmap_cluster(cursor, &mapcount, - vma, page); - if (ret == SWAP_MLOCK) - mlocked = 2; /* to return below */ + if (try_to_unmap_cluster(cursor, &mapcount, + vma, page) == SWAP_MLOCK) + ret = SWAP_MLOCK; cursor += CLUSTER_SIZE; vma->vm_private_data = (void *) cursor; if ((int)mapcount <= 0) @@ -1183,10 +1262,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) vma->vm_private_data = NULL; out: spin_unlock(&mapping->i_mmap_lock); - if (mlocked) - ret = SWAP_MLOCK; /* actually mlocked the page */ - else if (ret == SWAP_MLOCK) - ret = SWAP_AGAIN; /* saw VM_LOCKED vma */ return ret; } @@ -1210,7 +1285,9 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) BUG_ON(!PageLocked(page)); - if (PageAnon(page)) + if (unlikely(PageKsm(page))) + ret = try_to_unmap_ksm(page, flags); + else if (PageAnon(page)) ret = try_to_unmap_anon(page, flags); else ret = try_to_unmap_file(page, flags); @@ -1229,17 +1306,99 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) * * Return values are: * - * SWAP_SUCCESS - no vma's holding page mlocked. + * SWAP_AGAIN - no vma is holding page mlocked, or, * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem + * SWAP_FAIL - page cannot be located at present * SWAP_MLOCK - page is now mlocked. */ int try_to_munlock(struct page *page) { VM_BUG_ON(!PageLocked(page) || PageLRU(page)); - if (PageAnon(page)) + if (unlikely(PageKsm(page))) + return try_to_unmap_ksm(page, TTU_MUNLOCK); + else if (PageAnon(page)) return try_to_unmap_anon(page, TTU_MUNLOCK); else return try_to_unmap_file(page, TTU_MUNLOCK); } +#ifdef CONFIG_MIGRATION +/* + * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): + * Called by migrate.c to remove migration ptes, but might be used more later. + */ +static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + struct anon_vma *anon_vma; + struct anon_vma_chain *avc; + int ret = SWAP_AGAIN; + + /* + * Note: remove_migration_ptes() cannot use page_lock_anon_vma() + * because that depends on page_mapped(); but not all its usages + * are holding mmap_sem, which also gave the necessary guarantee + * (that this anon_vma's slab has not already been destroyed). + * This needs to be reviewed later: avoiding page_lock_anon_vma() + * is risky, and currently limits the usefulness of rmap_walk(). + */ + anon_vma = page_anon_vma(page); + if (!anon_vma) + return ret; + spin_lock(&anon_vma->lock); + list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { + struct vm_area_struct *vma = avc->vma; + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret = rmap_one(page, vma, address, arg); + if (ret != SWAP_AGAIN) + break; + } + spin_unlock(&anon_vma->lock); + return ret; +} + +static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + struct address_space *mapping = page->mapping; + pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + struct vm_area_struct *vma; + struct prio_tree_iter iter; + int ret = SWAP_AGAIN; + + if (!mapping) + return ret; + spin_lock(&mapping->i_mmap_lock); + vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { + unsigned long address = vma_address(page, vma); + if (address == -EFAULT) + continue; + ret = rmap_one(page, vma, address, arg); + if (ret != SWAP_AGAIN) + break; + } + /* + * No nonlinear handling: being always shared, nonlinear vmas + * never contain migration ptes. Decide what to do about this + * limitation to linear when we need rmap_walk() on nonlinear. + */ + spin_unlock(&mapping->i_mmap_lock); + return ret; +} + +int rmap_walk(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg) +{ + VM_BUG_ON(!PageLocked(page)); + + if (unlikely(PageKsm(page))) + return rmap_walk_ksm(page, rmap_one, arg); + else if (PageAnon(page)) + return rmap_walk_anon(page, rmap_one, arg); + else + return rmap_walk_file(page, rmap_one, arg); +} +#endif /* CONFIG_MIGRATION */ |