summaryrefslogtreecommitdiff
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c114
1 files changed, 60 insertions, 54 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 3f9afded581b..681da61080bc 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -13,6 +13,7 @@
#include <linux/compiler.h>
#include <linux/dax.h>
#include <linux/fs.h>
+#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/capability.h>
#include <linux/kernel_stat.h>
@@ -518,7 +519,7 @@ EXPORT_SYMBOL(filemap_write_and_wait);
*
* Write out and wait upon file offsets lstart->lend, inclusive.
*
- * Note that `lend' is inclusive (describes the last byte to be written) so
+ * Note that @lend is inclusive (describes the last byte to be written) so
* that this function can be used to write to the very end-of-file (end = -1).
*/
int filemap_write_and_wait_range(struct address_space *mapping,
@@ -788,7 +789,7 @@ static int wake_page_function(wait_queue_t *wait, unsigned mode, int sync, void
return autoremove_wake_function(wait, mode, sync, key);
}
-void wake_up_page_bit(struct page *page, int bit_nr)
+static void wake_up_page_bit(struct page *page, int bit_nr)
{
wait_queue_head_t *q = page_waitqueue(page);
struct wait_page_key key;
@@ -821,7 +822,13 @@ void wake_up_page_bit(struct page *page, int bit_nr)
}
spin_unlock_irqrestore(&q->lock, flags);
}
-EXPORT_SYMBOL(wake_up_page_bit);
+
+static void wake_up_page(struct page *page, int bit)
+{
+ if (!PageWaiters(page))
+ return;
+ wake_up_page_bit(page, bit);
+}
static inline int wait_on_page_bit_common(wait_queue_head_t *q,
struct page *page, int bit_nr, int state, bool lock)
@@ -1002,9 +1009,12 @@ void page_endio(struct page *page, bool is_write, int err)
unlock_page(page);
} else {
if (err) {
+ struct address_space *mapping;
+
SetPageError(page);
- if (page->mapping)
- mapping_set_error(page->mapping, err);
+ mapping = page_mapping(page);
+ if (mapping)
+ mapping_set_error(mapping, err);
}
end_page_writeback(page);
}
@@ -1013,7 +1023,7 @@ EXPORT_SYMBOL_GPL(page_endio);
/**
* __lock_page - get a lock on the page, assuming we need to sleep to get it
- * @page: the page to lock
+ * @__page: the page to lock
*/
void __lock_page(struct page *__page)
{
@@ -1267,12 +1277,14 @@ EXPORT_SYMBOL(find_lock_entry);
*
* PCG flags modify how the page is returned.
*
- * FGP_ACCESSED: the page will be marked accessed
- * FGP_LOCK: Page is return locked
- * FGP_CREAT: If page is not present then a new page is allocated using
- * @gfp_mask and added to the page cache and the VM's LRU
- * list. The page is returned locked and with an increased
- * refcount. Otherwise, %NULL is returned.
+ * @fgp_flags can be:
+ *
+ * - FGP_ACCESSED: the page will be marked accessed
+ * - FGP_LOCK: Page is return locked
+ * - FGP_CREAT: If page is not present then a new page is allocated using
+ * @gfp_mask and added to the page cache and the VM's LRU
+ * list. The page is returned locked and with an increased
+ * refcount. Otherwise, NULL is returned.
*
* If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
* if the GFP flags specified for FGP_CREAT are atomic.
@@ -2023,7 +2035,6 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (iocb->ki_flags & IOCB_DIRECT) {
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
- struct iov_iter data = *iter;
loff_t size;
size = i_size_read(inode);
@@ -2034,11 +2045,12 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
file_accessed(file);
- retval = mapping->a_ops->direct_IO(iocb, &data);
+ retval = mapping->a_ops->direct_IO(iocb, iter);
if (retval >= 0) {
iocb->ki_pos += retval;
- iov_iter_advance(iter, retval);
+ count -= retval;
}
+ iov_iter_revert(iter, iov_iter_count(iter) - count);
/*
* Btrfs can have a short DIO read if we encounter
@@ -2049,7 +2061,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
* the rest of the read. Buffered reads will not work for
* DAX files, so don't bother trying.
*/
- if (retval < 0 || !iov_iter_count(iter) || iocb->ki_pos >= size ||
+ if (retval < 0 || !count || iocb->ki_pos >= size ||
IS_DAX(inode))
goto out;
}
@@ -2163,7 +2175,6 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma,
/**
* filemap_fault - read in file data for page fault handling
- * @vma: vma in which the fault was taken
* @vmf: struct vm_fault containing details of the fault
*
* filemap_fault() is invoked via the vma operations vector for a
@@ -2185,20 +2196,20 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma,
*
* We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
*/
-int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int filemap_fault(struct vm_fault *vmf)
{
int error;
- struct file *file = vma->vm_file;
+ struct file *file = vmf->vma->vm_file;
struct address_space *mapping = file->f_mapping;
struct file_ra_state *ra = &file->f_ra;
struct inode *inode = mapping->host;
pgoff_t offset = vmf->pgoff;
+ pgoff_t max_off;
struct page *page;
- loff_t size;
int ret = 0;
- size = round_up(i_size_read(inode), PAGE_SIZE);
- if (offset >= size >> PAGE_SHIFT)
+ max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ if (unlikely(offset >= max_off))
return VM_FAULT_SIGBUS;
/*
@@ -2210,12 +2221,12 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* We found the page, so try async readahead before
* waiting for the lock.
*/
- do_async_mmap_readahead(vma, ra, file, page, offset);
+ do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
} else if (!page) {
/* No page in the page cache at all */
- do_sync_mmap_readahead(vma, ra, file, offset);
+ do_sync_mmap_readahead(vmf->vma, ra, file, offset);
count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+ mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
ret = VM_FAULT_MAJOR;
retry_find:
page = find_get_page(mapping, offset);
@@ -2223,7 +2234,7 @@ retry_find:
goto no_cached_page;
}
- if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
+ if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) {
put_page(page);
return ret | VM_FAULT_RETRY;
}
@@ -2247,8 +2258,8 @@ retry_find:
* Found the page and have a reference on it.
* We must recheck i_size under page lock.
*/
- size = round_up(i_size_read(inode), PAGE_SIZE);
- if (unlikely(offset >= size >> PAGE_SHIFT)) {
+ max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ if (unlikely(offset >= max_off)) {
unlock_page(page);
put_page(page);
return VM_FAULT_SIGBUS;
@@ -2314,7 +2325,7 @@ void filemap_map_pages(struct vm_fault *vmf,
struct file *file = vmf->vma->vm_file;
struct address_space *mapping = file->f_mapping;
pgoff_t last_pgoff = start_pgoff;
- loff_t size;
+ unsigned long max_idx;
struct page *head, *page;
rcu_read_lock();
@@ -2360,8 +2371,8 @@ repeat:
if (page->mapping != mapping || !PageUptodate(page))
goto unlock;
- size = round_up(i_size_read(mapping->host), PAGE_SIZE);
- if (page->index >= size >> PAGE_SHIFT)
+ max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
+ if (page->index >= max_idx)
goto unlock;
if (file->f_ra.mmap_miss > 0)
@@ -2390,14 +2401,14 @@ next:
}
EXPORT_SYMBOL(filemap_map_pages);
-int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+int filemap_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
int ret = VM_FAULT_LOCKED;
sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
+ file_update_time(vmf->vma->vm_file);
lock_page(page);
if (page->mapping != inode->i_mapping) {
unlock_page(page);
@@ -2695,7 +2706,6 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
ssize_t written;
size_t write_len;
pgoff_t end;
- struct iov_iter data;
write_len = iov_iter_count(from);
end = (pos + write_len - 1) >> PAGE_SHIFT;
@@ -2710,22 +2720,19 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
* about to write. We do this *before* the write so that we can return
* without clobbering -EIOCBQUEUED from ->direct_IO().
*/
- if (mapping->nrpages) {
- written = invalidate_inode_pages2_range(mapping,
+ written = invalidate_inode_pages2_range(mapping,
pos >> PAGE_SHIFT, end);
- /*
- * If a page can not be invalidated, return 0 to fall back
- * to buffered write.
- */
- if (written) {
- if (written == -EBUSY)
- return 0;
- goto out;
- }
+ /*
+ * If a page can not be invalidated, return 0 to fall back
+ * to buffered write.
+ */
+ if (written) {
+ if (written == -EBUSY)
+ return 0;
+ goto out;
}
- data = *from;
- written = mapping->a_ops->direct_IO(iocb, &data);
+ written = mapping->a_ops->direct_IO(iocb, from);
/*
* Finally, try again to invalidate clean pages which might have been
@@ -2735,20 +2742,19 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
* so we don't support it 100%. If this invalidation
* fails, tough, the write still worked...
*/
- if (mapping->nrpages) {
- invalidate_inode_pages2_range(mapping,
- pos >> PAGE_SHIFT, end);
- }
+ invalidate_inode_pages2_range(mapping,
+ pos >> PAGE_SHIFT, end);
if (written > 0) {
pos += written;
- iov_iter_advance(from, written);
+ write_len -= written;
if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
i_size_write(inode, pos);
mark_inode_dirty(inode);
}
iocb->ki_pos = pos;
}
+ iov_iter_revert(from, write_len - iov_iter_count(from));
out:
return written;
}
@@ -2992,7 +2998,7 @@ EXPORT_SYMBOL(generic_file_write_iter);
* @gfp_mask: memory allocation flags (and I/O mode)
*
* The address_space is to try to release any data against the page
- * (presumably at page->private). If the release was successful, return `1'.
+ * (presumably at page->private). If the release was successful, return '1'.
* Otherwise return zero.
*
* This may also be called if PG_fscache is set on a page, indicating that the