diff options
author | Sean Paul <seanpaul@chromium.org> | 2017-05-18 09:24:30 -0400 |
---|---|---|
committer | Sean Paul <seanpaul@chromium.org> | 2017-05-18 09:24:30 -0400 |
commit | 6b7781b42dc9bc9bcd1523b6c24b876cdda0bef3 (patch) | |
tree | ee55c67e4ea30b9eb44f301ba0bde2e631a26162 /mm/truncate.c | |
parent | 52d9d38c183bf0e09601d875ea31bb53c05dd8cf (diff) | |
parent | e98c58e55f68f8785aebfab1f8c9a03d8de0afe1 (diff) |
Merge remote-tracking branch 'airlied/drm-next' into drm-misc-next
Picking up drm-next @ 4.12-rc1 in order to apply Michal Hocko's vmalloc patch set
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Diffstat (limited to 'mm/truncate.c')
-rw-r--r-- | mm/truncate.c | 32 |
1 files changed, 22 insertions, 10 deletions
diff --git a/mm/truncate.c b/mm/truncate.c index 6263affdef88..6479ed2afc53 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -67,17 +67,14 @@ static void truncate_exceptional_entry(struct address_space *mapping, /* * Invalidate exceptional entry if easily possible. This handles exceptional - * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and - * clean entries. + * entries for invalidate_inode_pages(). */ static int invalidate_exceptional_entry(struct address_space *mapping, pgoff_t index, void *entry) { - /* Handled by shmem itself */ - if (shmem_mapping(mapping)) + /* Handled by shmem itself, or for DAX we do nothing. */ + if (shmem_mapping(mapping) || dax_mapping(mapping)) return 1; - if (dax_mapping(mapping)) - return dax_invalidate_mapping_entry(mapping, index); clear_shadow_entry(mapping, index, entry); return 1; } @@ -266,9 +263,8 @@ void truncate_inode_pages_range(struct address_space *mapping, pgoff_t index; int i; - cleancache_invalidate_inode(mapping); if (mapping->nrpages == 0 && mapping->nrexceptional == 0) - return; + goto out; /* Offsets within partial pages */ partial_start = lstart & (PAGE_SIZE - 1); @@ -363,7 +359,7 @@ void truncate_inode_pages_range(struct address_space *mapping, * will be released, just zeroed, so we can bail out now. */ if (start >= end) - return; + goto out; index = start; for ( ; ; ) { @@ -410,6 +406,8 @@ void truncate_inode_pages_range(struct address_space *mapping, pagevec_release(&pvec); index++; } + +out: cleancache_invalidate_inode(mapping); } EXPORT_SYMBOL(truncate_inode_pages_range); @@ -623,7 +621,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping, int ret2 = 0; int did_range_unmap = 0; - cleancache_invalidate_inode(mapping); + if (mapping->nrpages == 0 && mapping->nrexceptional == 0) + goto out; + pagevec_init(&pvec, 0); index = start; while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, @@ -686,6 +686,18 @@ int invalidate_inode_pages2_range(struct address_space *mapping, cond_resched(); index++; } + /* + * For DAX we invalidate page tables after invalidating radix tree. We + * could invalidate page tables while invalidating each entry however + * that would be expensive. And doing range unmapping before doesn't + * work as we have no cheap way to find whether radix tree entry didn't + * get remapped later. + */ + if (dax_mapping(mapping)) { + unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT, + (loff_t)(end - start + 1) << PAGE_SHIFT, 0); + } +out: cleancache_invalidate_inode(mapping); return ret; } |