summaryrefslogtreecommitdiff
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-03-28 19:38:20 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2020-05-06 17:14:14 -0400
commit2040ab403ba11900f19596fa16427624e5b8721d (patch)
treee7990c2237a33cbe812039ee3c7ed67d57e4fa01 /mm/filemap.c
parentb3785d8f7feca002a2ebaf287acecb27c34fc61a (diff)
mm: pagecache add lock
Add a per address space lock around adding pages to the pagecache - making it possible for fallocate INSERT_RANGE/COLLAPSE_RANGE to work correctly, and also hopefully making truncate and dio a bit saner.
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c99
1 files changed, 89 insertions, 10 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 45f1c6d73b5b..b147e55a8d67 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -111,6 +111,73 @@
* ->tasklist_lock (memory_failure, collect_procs_ao)
*/
+static void __pagecache_lock_put(struct pagecache_lock *lock, long i)
+{
+ BUG_ON(atomic_long_read(&lock->v) == 0);
+
+ if (atomic_long_sub_return_release(i, &lock->v) == 0)
+ wake_up_all(&lock->wait);
+}
+
+static bool __pagecache_lock_tryget(struct pagecache_lock *lock, long i)
+{
+ long v = atomic_long_read(&lock->v), old;
+
+ do {
+ old = v;
+
+ if (i > 0 ? v < 0 : v > 0)
+ return false;
+ } while ((v = atomic_long_cmpxchg_acquire(&lock->v,
+ old, old + i)) != old);
+ return true;
+}
+
+static void __pagecache_lock_get(struct pagecache_lock *lock, long i)
+{
+ wait_event(lock->wait, __pagecache_lock_tryget(lock, i));
+}
+
+void pagecache_add_put(struct pagecache_lock *lock)
+{
+ __pagecache_lock_put(lock, 1);
+}
+EXPORT_SYMBOL(pagecache_add_put);
+
+void pagecache_add_get(struct pagecache_lock *lock)
+{
+ __pagecache_lock_get(lock, 1);
+}
+EXPORT_SYMBOL(pagecache_add_get);
+
+void __pagecache_block_put(struct pagecache_lock *lock)
+{
+ __pagecache_lock_put(lock, -1);
+}
+EXPORT_SYMBOL(__pagecache_block_put);
+
+void __pagecache_block_get(struct pagecache_lock *lock)
+{
+ __pagecache_lock_get(lock, -1);
+}
+EXPORT_SYMBOL(__pagecache_block_get);
+
+void pagecache_block_put(struct pagecache_lock *lock)
+{
+ BUG_ON(current->pagecache_lock != lock);
+ current->pagecache_lock = NULL;
+ __pagecache_lock_put(lock, -1);
+}
+EXPORT_SYMBOL(pagecache_block_put);
+
+void pagecache_block_get(struct pagecache_lock *lock)
+{
+ __pagecache_lock_get(lock, -1);
+ BUG_ON(current->pagecache_lock);
+ current->pagecache_lock = lock;
+}
+EXPORT_SYMBOL(pagecache_block_get);
+
static int page_cache_tree_insert(struct address_space *mapping,
struct page *page, void **shadowp)
{
@@ -858,19 +925,19 @@ static int __add_to_page_cache_locked(struct page *page,
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
+ if (current->pagecache_lock != &mapping->add_lock)
+ pagecache_add_get(&mapping->add_lock);
+
if (!huge) {
error = mem_cgroup_try_charge(page, current->mm,
gfp_mask, &memcg, false);
if (error)
- return error;
+ goto err;
}
error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
- if (error) {
- if (!huge)
- mem_cgroup_cancel_charge(page, memcg, false);
- return error;
- }
+ if (error)
+ goto err_uncharge;
get_page(page);
page->mapping = mapping;
@@ -889,15 +956,20 @@ static int __add_to_page_cache_locked(struct page *page,
if (!huge)
mem_cgroup_commit_charge(page, memcg, false, false);
trace_mm_filemap_add_to_page_cache(page);
- return 0;
+err:
+ if (current->pagecache_lock != &mapping->add_lock)
+ pagecache_add_put(&mapping->add_lock);
+
+ return error;
err_insert:
page->mapping = NULL;
/* Leave page->index set: truncation relies upon it */
xa_unlock_irq(&mapping->i_pages);
+ put_page(page);
+err_uncharge:
if (!huge)
mem_cgroup_cancel_charge(page, memcg, false);
- put_page(page);
- return error;
+ goto err;
}
/**
@@ -2532,7 +2604,14 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
* Do we have something in the page cache already?
*/
page = find_get_page(mapping, offset);
- if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
+ if (unlikely(current->pagecache_lock == &mapping->add_lock)) {
+ /*
+ * fault from e.g. dio -> get_user_pages() - _don't_ want to do
+ * readahead, only read in page we need:
+ */
+ if (!page)
+ goto no_cached_page;
+ } else if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
/*
* We found the page, so try async readahead before
* waiting for the lock.