summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-09-19 17:11:53 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2016-09-19 17:11:53 -0800
commit6636e50b340bd13812164645ce00879593b0860f (patch)
tree87d3cabaf3574855f7d7833983245f94e3194bf3
parent2a3d115fac9840d927ad6cb9aa9e34ea35cb4f5d (diff)
mm: Real pagecache iterators
Introduce for_each_pagecache_page() and related macros, with the goal of replacing most/all uses of pagevec_lookup(). For the most part this shouldn't be a functional change. The one functional difference with the new macros is that they now take an @end parameter, so we're able to avoid grabbing pages in __find_get_pages() that we'll never use. This patch only does some of the conversions, the ones I was able to easily test myself - the conversions are mechanical but tricky enough they generally warrent testing. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--include/linux/pagevec.h67
-rw-r--r--mm/filemap.c76
-rw-r--r--mm/page-writeback.c148
-rw-r--r--mm/swap.c33
-rw-r--r--mm/truncate.c259
5 files changed, 274 insertions, 309 deletions
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index b45d391b4540..e60d74148d0b 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -22,10 +22,6 @@ struct pagevec {
void __pagevec_release(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec);
-unsigned pagevec_lookup_entries(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t start, unsigned nr_entries,
- pgoff_t *indices);
void pagevec_remove_exceptionals(struct pagevec *pvec);
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
pgoff_t start, unsigned nr_pages);
@@ -69,4 +65,67 @@ static inline void pagevec_release(struct pagevec *pvec)
__pagevec_release(pvec);
}
+struct pagecache_iter {
+ unsigned nr;
+ unsigned idx;
+ pgoff_t index;
+ struct page *pages[PAGEVEC_SIZE];
+ pgoff_t indices[PAGEVEC_SIZE];
+};
+
+static inline void pagecache_iter_init(struct pagecache_iter *iter,
+ pgoff_t start)
+{
+ iter->nr = 0;
+ iter->idx = 0;
+ iter->index = start;
+}
+
+void __pagecache_iter_release(struct pagecache_iter *iter);
+
+/**
+ * pagecache_iter_release - release cached pages from pagacache_iter
+ *
+ * Must be called if breaking out of for_each_pagecache_page() etc. early - not
+ * needed if pagecache_iter_next() returned NULL and loop terminated normally
+ */
+static inline void pagecache_iter_release(struct pagecache_iter *iter)
+{
+ if (iter->nr)
+ __pagecache_iter_release(iter);
+}
+
+struct page *pagecache_iter_next(struct pagecache_iter *iter,
+ struct address_space *mapping,
+ pgoff_t end, pgoff_t *index,
+ unsigned flags);
+
+#define __pagecache_iter_for_each(_iter, _mapping, _start, _end, \
+ _page, _index, _flags) \
+ for (pagecache_iter_init((_iter), (_start)); \
+ ((_page) = pagecache_iter_next((_iter), (_mapping), \
+ (_end), (_index), (_flags)));)
+
+#define for_each_pagecache_page(_iter, _mapping, _start, _end, _page) \
+ __pagecache_iter_for_each((_iter), (_mapping), (_start), (_end),\
+ (_page), NULL, 0)
+
+#define for_each_pagecache_page_contig(_iter, _mapping, _start, _end, _page)\
+ __pagecache_iter_for_each((_iter), (_mapping), (_start), (_end),\
+ (_page), NULL, RADIX_TREE_ITER_CONTIG)
+
+#define for_each_pagecache_tag(_iter, _mapping, _tag, _start, _end, _page)\
+ __pagecache_iter_for_each((_iter), (_mapping), (_start), (_end),\
+ (_page), NULL, RADIX_TREE_ITER_TAGGED|(_tag))
+
+#define for_each_pagecache_entry(_iter, _mapping, _start, _end, _page, _index)\
+ __pagecache_iter_for_each((_iter), (_mapping), (_start), (_end),\
+ (_page), &(_index), RADIX_TREE_ITER_EXCEPTIONAL)
+
+#define for_each_pagecache_entry_tag(_iter, _mapping, _tag, \
+ _start, _end, _page, _index) \
+ __pagecache_iter_for_each((_iter), (_mapping), (_start), (_end),\
+ (_page), &(_index), RADIX_TREE_ITER_EXCEPTIONAL|\
+ RADIX_TREE_ITER_TAGGED|(_tag))
+
#endif /* _LINUX_PAGEVEC_H */
diff --git a/mm/filemap.c b/mm/filemap.c
index 1247fde1d1f6..bba724f24ba9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -404,35 +404,20 @@ EXPORT_SYMBOL(filemap_flush);
static int __filemap_fdatawait_range(struct address_space *mapping,
loff_t start_byte, loff_t end_byte)
{
- pgoff_t index = start_byte >> PAGE_SHIFT;
+ pgoff_t start = start_byte >> PAGE_SHIFT;
pgoff_t end = end_byte >> PAGE_SHIFT;
- struct pagevec pvec;
- int nr_pages;
+ struct pagecache_iter iter;
+ struct page *page;
int ret = 0;
if (end_byte < start_byte)
goto out;
- pagevec_init(&pvec, 0);
- while ((index <= end) &&
- (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
- PAGECACHE_TAG_WRITEBACK,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
- unsigned i;
-
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
-
- /* until radix tree lookup accepts end_index */
- if (page->index > end)
- continue;
-
- wait_on_page_writeback(page);
- if (TestClearPageError(page))
- ret = -EIO;
- }
- pagevec_release(&pvec);
- cond_resched();
+ for_each_pagecache_tag(&iter, mapping, PAGECACHE_TAG_WRITEBACK,
+ start, end, page) {
+ wait_on_page_writeback(page);
+ if (TestClearPageError(page))
+ ret = -EIO;
}
out:
return ret;
@@ -1388,6 +1373,51 @@ no_entry:
}
EXPORT_SYMBOL(__find_get_pages);
+void __pagecache_iter_release(struct pagecache_iter *iter)
+{
+ lru_add_drain();
+ release_pages(iter->pages, iter->nr, 0);
+ iter->nr = 0;
+ iter->idx = 0;
+}
+EXPORT_SYMBOL(__pagecache_iter_release);
+
+/**
+ * pagecache_iter_next - get next page from pagecache iterator and advance
+ * iterator
+ * @iter: The iterator to advance
+ * @mapping: The address_space to search
+ * @end: Page cache index to stop at (inclusive)
+ * @index: if non NULL, index of page or entry will be returned here
+ * @flags: radix tree iter flags and tag for __find_get_pages()
+ */
+struct page *pagecache_iter_next(struct pagecache_iter *iter,
+ struct address_space *mapping,
+ pgoff_t end, pgoff_t *index,
+ unsigned flags)
+{
+ struct page *page;
+
+ if (iter->idx >= iter->nr) {
+ pagecache_iter_release(iter);
+ cond_resched();
+
+ iter->nr = __find_get_pages(mapping, iter->index, end,
+ PAGEVEC_SIZE, iter->pages,
+ iter->indices, flags);
+ if (!iter->nr)
+ return NULL;
+ }
+
+ iter->index = iter->indices[iter->idx] + 1;
+ if (index)
+ *index = iter->indices[iter->idx];
+ page = iter->pages[iter->idx];
+ iter->idx++;
+ return page;
+}
+EXPORT_SYMBOL(pagecache_iter_next);
+
/*
* CD/DVDs are error prone. When a medium error occurs, the driver may fail
* a _large_ part of the i/o request. Imagine the worst scenario:
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index c263252c14af..458eaf7b40ac 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2163,10 +2163,10 @@ int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
void *data)
{
+ struct pagecache_iter iter;
+ struct page *page;
int ret = 0;
int done = 0;
- struct pagevec pvec;
- int nr_pages;
pgoff_t uninitialized_var(writeback_index);
pgoff_t index;
pgoff_t end; /* Inclusive */
@@ -2175,7 +2175,6 @@ int write_cache_pages(struct address_space *mapping,
int range_whole = 0;
int tag;
- pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
@@ -2198,105 +2197,80 @@ int write_cache_pages(struct address_space *mapping,
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
- done_index = index;
- while (!done && (index <= end)) {
- int i;
-
- nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
- if (nr_pages == 0)
- break;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
-
- /*
- * At this point, the page may be truncated or
- * invalidated (changing page->mapping to NULL), or
- * even swizzled back from swapper_space to tmpfs file
- * mapping. However, page->index will not change
- * because we have a reference on the page.
- */
- if (page->index > end) {
- /*
- * can't be range_cyclic (1st pass) because
- * end == -1 in that case.
- */
- done = 1;
- break;
- }
+ done_index = index;
- done_index = page->index;
+ for_each_pagecache_tag(&iter, mapping, tag, index, end, page) {
+ done_index = page->index;
- lock_page(page);
+ lock_page(page);
- /*
- * Page truncated or invalidated. We can freely skip it
- * then, even for data integrity operations: the page
- * has disappeared concurrently, so there could be no
- * real expectation of this data interity operation
- * even if there is now a new, dirty page at the same
- * pagecache address.
- */
- if (unlikely(page->mapping != mapping)) {
+ /*
+ * Page truncated or invalidated. We can freely skip it
+ * then, even for data integrity operations: the page
+ * has disappeared concurrently, so there could be no
+ * real expectation of this data interity operation
+ * even if there is now a new, dirty page at the same
+ * pagecache address.
+ */
+ if (unlikely(page->mapping != mapping)) {
continue_unlock:
- unlock_page(page);
- continue;
- }
-
- if (!PageDirty(page)) {
- /* someone wrote it for us */
- goto continue_unlock;
- }
+ unlock_page(page);
+ continue;
+ }
- if (PageWriteback(page)) {
- if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
- else
- goto continue_unlock;
- }
+ if (!PageDirty(page)) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
- BUG_ON(PageWriteback(page));
- if (!clear_page_dirty_for_io(page))
+ if (PageWriteback(page)) {
+ if (wbc->sync_mode != WB_SYNC_NONE)
+ wait_on_page_writeback(page);
+ else
goto continue_unlock;
+ }
- trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
- ret = (*writepage)(page, wbc, data);
- if (unlikely(ret)) {
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
- unlock_page(page);
- ret = 0;
- } else {
- /*
- * done_index is set past this page,
- * so media errors will not choke
- * background writeout for the entire
- * file. This has consequences for
- * range_cyclic semantics (ie. it may
- * not be suitable for data integrity
- * writeout).
- */
- done_index = page->index + 1;
- done = 1;
- break;
- }
- }
+ BUG_ON(PageWriteback(page));
+ if (!clear_page_dirty_for_io(page))
+ goto continue_unlock;
- /*
- * We stop writing back only if we are not doing
- * integrity sync. In case of integrity sync we have to
- * keep going until we have written all the pages
- * we tagged for writeback prior to entering this loop.
- */
- if (--wbc->nr_to_write <= 0 &&
- wbc->sync_mode == WB_SYNC_NONE) {
+ trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
+ ret = (*writepage)(page, wbc, data);
+ if (unlikely(ret)) {
+ if (ret == AOP_WRITEPAGE_ACTIVATE) {
+ unlock_page(page);
+ ret = 0;
+ } else {
+ /*
+ * done_index is set past this page,
+ * so media errors will not choke
+ * background writeout for the entire
+ * file. This has consequences for
+ * range_cyclic semantics (ie. it may
+ * not be suitable for data integrity
+ * writeout).
+ */
+ done_index = page->index + 1;
done = 1;
break;
}
}
- pagevec_release(&pvec);
- cond_resched();
+
+ /*
+ * We stop writing back only if we are not doing
+ * integrity sync. In case of integrity sync we have to
+ * keep going until we have written all the pages
+ * we tagged for writeback prior to entering this loop.
+ */
+ if (--wbc->nr_to_write <= 0 &&
+ wbc->sync_mode == WB_SYNC_NONE) {
+ done = 1;
+ break;
+ }
}
+ pagecache_iter_release(&iter);
+
if (!cycled && !done) {
/*
* range_cyclic:
diff --git a/mm/swap.c b/mm/swap.c
index 90530ff8ed16..eaabfadc4526 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -736,6 +736,9 @@ void release_pages(struct page **pages, int nr, bool cold)
for (i = 0; i < nr; i++) {
struct page *page = pages[i];
+ if (radix_tree_exceptional_entry(page))
+ continue;
+
/*
* Make sure the IRQ-safe lock-holding time does not get
* excessive with a continuous string of pages from the
@@ -880,36 +883,6 @@ void __pagevec_lru_add(struct pagevec *pvec)
EXPORT_SYMBOL(__pagevec_lru_add);
/**
- * pagevec_lookup_entries - gang pagecache lookup
- * @pvec: Where the resulting entries are placed
- * @mapping: The address_space to search
- * @start: The starting entry index
- * @nr_entries: The maximum number of entries
- * @indices: The cache indices corresponding to the entries in @pvec
- *
- * pagevec_lookup_entries() will search for and return a group of up
- * to @nr_entries pages and shadow entries in the mapping. All
- * entries are placed in @pvec. pagevec_lookup_entries() takes a
- * reference against actual pages in @pvec.
- *
- * The search returns a group of mapping-contiguous entries with
- * ascending indexes. There may be holes in the indices due to
- * not-present entries.
- *
- * pagevec_lookup_entries() returns the number of entries which were
- * found.
- */
-unsigned pagevec_lookup_entries(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t start, unsigned nr_pages,
- pgoff_t *indices)
-{
- pvec->nr = find_get_entries(mapping, start, nr_pages,
- pvec->pages, indices);
- return pagevec_count(pvec);
-}
-
-/**
* pagevec_remove_exceptionals - pagevec exceptionals pruning
* @pvec: The pagevec to prune
*
diff --git a/mm/truncate.c b/mm/truncate.c
index 4064f8f53daa..afcac43e2ff8 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -229,10 +229,10 @@ void truncate_inode_pages_range(struct address_space *mapping,
pgoff_t end; /* exclusive */
unsigned int partial_start; /* inclusive */
unsigned int partial_end; /* exclusive */
- struct pagevec pvec;
- pgoff_t indices[PAGEVEC_SIZE];
- pgoff_t index;
- int i;
+ struct pagecache_iter iter;
+ struct page *page;
+ pgoff_t index;
+ bool found;
cleancache_invalidate_inode(mapping);
if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
@@ -248,51 +248,36 @@ void truncate_inode_pages_range(struct address_space *mapping,
* start of the range and 'partial_end' at the end of the range.
* Note that 'end' is exclusive while 'lend' is inclusive.
*/
- start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ start = round_up(lstart, PAGE_SIZE) >> PAGE_SHIFT;
if (lend == -1)
/*
- * lend == -1 indicates end-of-file so we have to set 'end'
- * to the highest possible pgoff_t and since the type is
- * unsigned we're using -1.
+ * lend == -1 indicates end-of-file so we have to set 'end' to
+ * the highest possible pgoff_t
*/
- end = -1;
+ end = ULONG_MAX;
else
end = (lend + 1) >> PAGE_SHIFT;
- pagevec_init(&pvec, 0);
- index = start;
- while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE),
- indices)) {
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
-
- /* We rely upon deletion not changing page->index */
- index = indices[i];
- if (index >= end)
- break;
+ if (start >= end)
+ goto do_partial;
- if (radix_tree_exceptional_entry(page)) {
- clear_exceptional_entry(mapping, index, page);
- continue;
- }
+ for_each_pagecache_entry(&iter, mapping, start, end - 1, page, index) {
+ if (radix_tree_exceptional_entry(page)) {
+ clear_exceptional_entry(mapping, index, page);
+ continue;
+ }
- if (!trylock_page(page))
- continue;
- WARN_ON(page->index != index);
- if (PageWriteback(page)) {
- unlock_page(page);
- continue;
- }
- truncate_inode_page(mapping, page);
+ if (!trylock_page(page))
+ continue;
+ WARN_ON(page->index != index);
+ if (PageWriteback(page)) {
unlock_page(page);
+ continue;
}
- pagevec_remove_exceptionals(&pvec);
- pagevec_release(&pvec);
- cond_resched();
- index++;
+ truncate_inode_page(mapping, page);
+ unlock_page(page);
}
-
+do_partial:
if (partial_start) {
struct page *page = find_lock_page(mapping, start - 1);
if (page) {
@@ -332,34 +317,12 @@ void truncate_inode_pages_range(struct address_space *mapping,
if (start >= end)
return;
- index = start;
- for ( ; ; ) {
- cond_resched();
- if (!pagevec_lookup_entries(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
- /* If all gone from start onwards, we're done */
- if (index == start)
- break;
- /* Otherwise restart to make sure all gone */
- index = start;
- continue;
- }
- if (index == start && indices[0] >= end) {
- /* All gone out of hole to be punched, we're done */
- pagevec_remove_exceptionals(&pvec);
- pagevec_release(&pvec);
- break;
- }
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
-
- /* We rely upon deletion not changing page->index */
- index = indices[i];
- if (index >= end) {
- /* Restart punch to make sure all gone */
- index = start - 1;
- break;
- }
+ do {
+ found = false;
+
+ for_each_pagecache_entry(&iter, mapping, start,
+ end - 1, page, index) {
+ found = true;
if (radix_tree_exceptional_entry(page)) {
clear_exceptional_entry(mapping, index, page);
@@ -372,10 +335,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
truncate_inode_page(mapping, page);
unlock_page(page);
}
- pagevec_remove_exceptionals(&pvec);
- pagevec_release(&pvec);
- index++;
- }
+ } while (found);
+
cleancache_invalidate_inode(mapping);
}
EXPORT_SYMBOL(truncate_inode_pages_range);
@@ -461,48 +422,32 @@ EXPORT_SYMBOL(truncate_inode_pages_final);
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
- pgoff_t indices[PAGEVEC_SIZE];
- struct pagevec pvec;
- pgoff_t index = start;
+ struct pagecache_iter iter;
+ struct page *page;
+ pgoff_t index;
unsigned long ret;
unsigned long count = 0;
- int i;
-
- pagevec_init(&pvec, 0);
- while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
- indices)) {
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
- /* We rely upon deletion not changing page->index */
- index = indices[i];
- if (index > end)
- break;
-
- if (radix_tree_exceptional_entry(page)) {
- clear_exceptional_entry(mapping, index, page);
- continue;
- }
-
- if (!trylock_page(page))
- continue;
- WARN_ON(page->index != index);
- ret = invalidate_inode_page(page);
- unlock_page(page);
- /*
- * Invalidation is a hint that the page is no longer
- * of interest and try to speed up its reclaim.
- */
- if (!ret)
- deactivate_file_page(page);
- count += ret;
+ for_each_pagecache_entry(&iter, mapping, start, end, page, index) {
+ if (radix_tree_exceptional_entry(page)) {
+ clear_exceptional_entry(mapping, index, page);
+ continue;
}
- pagevec_remove_exceptionals(&pvec);
- pagevec_release(&pvec);
- cond_resched();
- index++;
+
+ if (!trylock_page(page))
+ continue;
+ WARN_ON(page->index != index);
+ ret = invalidate_inode_page(page);
+ unlock_page(page);
+ /*
+ * Invalidation is a hint that the page is no longer
+ * of interest and try to speed up its reclaim.
+ */
+ if (!ret)
+ deactivate_file_page(page);
+ count += ret;
}
+
return count;
}
EXPORT_SYMBOL(invalidate_mapping_pages);
@@ -566,75 +511,59 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
- pgoff_t indices[PAGEVEC_SIZE];
- struct pagevec pvec;
+ struct pagecache_iter iter;
+ struct page *page;
pgoff_t index;
- int i;
int ret = 0;
int ret2 = 0;
int did_range_unmap = 0;
cleancache_invalidate_inode(mapping);
- pagevec_init(&pvec, 0);
- index = start;
- while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
- indices)) {
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
-
- /* We rely upon deletion not changing page->index */
- index = indices[i];
- if (index > end)
- break;
- if (radix_tree_exceptional_entry(page)) {
- clear_exceptional_entry(mapping, index, page);
- continue;
- }
+ for_each_pagecache_entry(&iter, mapping, start, end, page, index) {
+ if (radix_tree_exceptional_entry(page)) {
+ clear_exceptional_entry(mapping, index, page);
+ continue;
+ }
- lock_page(page);
- WARN_ON(page->index != index);
- if (page->mapping != mapping) {
- unlock_page(page);
- continue;
- }
- wait_on_page_writeback(page);
- if (page_mapped(page)) {
- if (!did_range_unmap) {
- /*
- * Zap the rest of the file in one hit.
- */
- unmap_mapping_range(mapping,
- (loff_t)index << PAGE_SHIFT,
- (loff_t)(1 + end - index)
- << PAGE_SHIFT,
- 0);
- did_range_unmap = 1;
- } else {
- /*
- * Just zap this page
- */
- unmap_mapping_range(mapping,
- (loff_t)index << PAGE_SHIFT,
- PAGE_SIZE, 0);
- }
- }
- BUG_ON(page_mapped(page));
- ret2 = do_launder_page(mapping, page);
- if (ret2 == 0) {
- if (!invalidate_complete_page2(mapping, page))
- ret2 = -EBUSY;
- }
- if (ret2 < 0)
- ret = ret2;
+ lock_page(page);
+ WARN_ON(page->index != index);
+ if (page->mapping != mapping) {
unlock_page(page);
+ continue;
}
- pagevec_remove_exceptionals(&pvec);
- pagevec_release(&pvec);
- cond_resched();
- index++;
+ wait_on_page_writeback(page);
+ if (page_mapped(page)) {
+ if (!did_range_unmap) {
+ /*
+ * Zap the rest of the file in one hit.
+ */
+ unmap_mapping_range(mapping,
+ (loff_t)index << PAGE_SHIFT,
+ (loff_t)(1 + end - index)
+ << PAGE_SHIFT,
+ 0);
+ did_range_unmap = 1;
+ } else {
+ /*
+ * Just zap this page
+ */
+ unmap_mapping_range(mapping,
+ (loff_t)index << PAGE_SHIFT,
+ PAGE_SIZE, 0);
+ }
+ }
+ BUG_ON(page_mapped(page));
+ ret2 = do_launder_page(mapping, page);
+ if (ret2 == 0) {
+ if (!invalidate_complete_page2(mapping, page))
+ ret2 = -EBUSY;
+ }
+ if (ret2 < 0)
+ ret = ret2;
+ unlock_page(page);
}
+
cleancache_invalidate_inode(mapping);
return ret;
}