summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/migrate.h6
-rw-r--r--mm/migrate.c70
3 files changed, 42 insertions, 40 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c823a3815e24..e917403f4d58 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -376,7 +376,8 @@ struct address_space_operations {
struct page* (*get_xip_page)(struct address_space *, sector_t,
int);
/* migrate the contents of a page to the specified target */
- int (*migratepage) (struct page *, struct page *);
+ int (*migratepage) (struct address_space *,
+ struct page *, struct page *);
};
struct backing_dev_info;
@@ -1772,7 +1773,8 @@ extern void simple_release_fs(struct vfsmount **mount, int *count);
extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t);
#ifdef CONFIG_MIGRATION
-extern int buffer_migrate_page(struct page *, struct page *);
+extern int buffer_migrate_page(struct address_space *,
+ struct page *, struct page *);
#else
#define buffer_migrate_page NULL
#endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index e8d3b08cc354..287c47b5e5df 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -6,12 +6,14 @@
#ifdef CONFIG_MIGRATION
extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
extern int putback_lru_pages(struct list_head *l);
-extern int migrate_page(struct page *, struct page *);
+extern int migrate_page(struct address_space *,
+ struct page *, struct page *);
extern int migrate_pages(struct list_head *l, struct list_head *t,
struct list_head *moved, struct list_head *failed);
extern int migrate_pages_to(struct list_head *pagelist,
struct vm_area_struct *vma, int dest);
-extern int fail_migrate_page(struct page *, struct page *);
+extern int fail_migrate_page(struct address_space *,
+ struct page *, struct page *);
extern int migrate_prep(void);
diff --git a/mm/migrate.c b/mm/migrate.c
index 8095c607a494..f65e69d94527 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -173,15 +173,11 @@ retry:
* 2 for pages with a mapping
* 3 for pages with a mapping and PagePrivate set.
*/
-static int migrate_page_move_mapping(struct page *newpage,
- struct page *page)
+static int migrate_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page)
{
- struct address_space *mapping = page_mapping(page);
struct page **radix_pointer;
- if (!mapping)
- return -EAGAIN;
-
write_lock_irq(&mapping->tree_lock);
radix_pointer = (struct page **)radix_tree_lookup_slot(
@@ -197,15 +193,8 @@ static int migrate_page_move_mapping(struct page *newpage,
/*
* Now we know that no one else is looking at the page.
- *
- * Certain minimal information about a page must be available
- * in order for other subsystems to properly handle the page if they
- * find it through the radix tree update before we are finished
- * copying the page.
*/
get_page(newpage);
- newpage->index = page->index;
- newpage->mapping = page->mapping;
if (PageSwapCache(page)) {
SetPageSwapCache(newpage);
set_page_private(newpage, page_private(page));
@@ -262,7 +251,8 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
***********************************************************/
/* Always fail migration. Used for mappings that are not movable */
-int fail_migrate_page(struct page *newpage, struct page *page)
+int fail_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page)
{
return -EIO;
}
@@ -274,13 +264,14 @@ EXPORT_SYMBOL(fail_migrate_page);
*
* Pages are locked upon entry and exit.
*/
-int migrate_page(struct page *newpage, struct page *page)
+int migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page)
{
int rc;
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
- rc = migrate_page_move_mapping(newpage, page);
+ rc = migrate_page_move_mapping(mapping, newpage, page);
if (rc)
return rc;
@@ -305,21 +296,18 @@ EXPORT_SYMBOL(migrate_page);
* if the underlying filesystem guarantees that no other references to "page"
* exist.
*/
-int buffer_migrate_page(struct page *newpage, struct page *page)
+int buffer_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page)
{
- struct address_space *mapping = page->mapping;
struct buffer_head *bh, *head;
int rc;
- if (!mapping)
- return -EAGAIN;
-
if (!page_has_buffers(page))
- return migrate_page(newpage, page);
+ return migrate_page(mapping, newpage, page);
head = page_buffers(page);
- rc = migrate_page_move_mapping(newpage, page);
+ rc = migrate_page_move_mapping(mapping, newpage, page);
if (rc)
return rc;
@@ -448,9 +436,6 @@ redo:
goto next;
}
- newpage = lru_to_page(to);
- lock_page(newpage);
-
/*
* Establish swap ptes for anonymous pages or destroy pte
* maps for files.
@@ -473,11 +458,18 @@ redo:
rc = -EPERM;
if (try_to_unmap(page, 1) == SWAP_FAIL)
/* A vma has VM_LOCKED set -> permanent failure */
- goto unlock_both;
+ goto unlock_page;
rc = -EAGAIN;
if (page_mapped(page))
- goto unlock_both;
+ goto unlock_page;
+
+ newpage = lru_to_page(to);
+ lock_page(newpage);
+ /* Prepare mapping for the new page.*/
+ newpage->index = page->index;
+ newpage->mapping = page->mapping;
+
/*
* Pages are properly locked and writeback is complete.
* Try to migrate the page.
@@ -494,7 +486,8 @@ redo:
* own migration function. This is the most common
* path for page migration.
*/
- rc = mapping->a_ops->migratepage(newpage, page);
+ rc = mapping->a_ops->migratepage(mapping,
+ newpage, page);
goto unlock_both;
}
@@ -524,7 +517,7 @@ redo:
*/
if (!page_has_buffers(page) ||
try_to_release_page(page, GFP_KERNEL)) {
- rc = migrate_page(newpage, page);
+ rc = migrate_page(mapping, newpage, page);
goto unlock_both;
}
@@ -553,12 +546,17 @@ unlock_page:
unlock_page(page);
next:
- if (rc == -EAGAIN) {
- retry++;
- } else if (rc) {
- /* Permanent failure */
- list_move(&page->lru, failed);
- nr_failed++;
+ if (rc) {
+ if (newpage)
+ newpage->mapping = NULL;
+
+ if (rc == -EAGAIN)
+ retry++;
+ else {
+ /* Permanent failure */
+ list_move(&page->lru, failed);
+ nr_failed++;
+ }
} else {
if (newpage) {
/* Successful migration. Return page to LRU */