summaryrefslogtreecommitdiff
path: root/mm/page_isolation.c
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-08-13 15:39:10 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-08-13 15:39:10 -0400
commit0980bd9cd32de2fef7eaa2858345c49d14498625 (patch)
tree41f5f823d0569a81b22037e79c22d823933a63f1 /mm/page_isolation.c
parent78821b2c0299ab807d483802f09897728b93bce0 (diff)
parent0d7614f09c1ebdbaa1599a5aba7593f147bf96ee (diff)
Merge commit 'v3.6-rc1' into linux-next
* commit 'v3.6-rc1': (9532 commits) Linux 3.6-rc1 mm: remove node_start_pfn checking in new WARN_ON for now ARM: mmp: add missing irqs.h arm: mvebu: fix typo in .dtsi comment for Armada XP SoCs ARM: PRIMA2: delete redundant codes to restore LATCHED when timer resumes libceph: fix crypto key null deref, memory leak ceph: simplify+fix atomic_open sh: explicitly include sh_dma.h in setup-sh7722.c um: Add arch/x86/um to MAINTAINERS um: pass siginfo to guest process um: fix ubd_file_size for read-only files md/dm-raid: DM_RAID should select MD_RAID10 md/raid1: submit IO from originating thread instead of md thread. raid5: raid5d handle stripe in batch way raid5: make_request use batch stripe release um: pull interrupt_end() into userspace() um: split syscall_trace(), pass pt_regs to it um: switch UPT_SET_RETURN_VALUE and regs_return_value to pt_regs MIPS: Loongson 2: Sort out clock managment. locks: remove unused lm_release_private ...
Diffstat (limited to 'mm/page_isolation.c')
-rw-r--r--mm/page_isolation.c93
1 files changed, 93 insertions, 0 deletions
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index c9f04774f2b8..247d1f175739 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -5,8 +5,101 @@
#include <linux/mm.h>
#include <linux/page-isolation.h>
#include <linux/pageblock-flags.h>
+#include <linux/memory.h>
#include "internal.h"
+/* called while holding zone->lock */
+static void set_pageblock_isolate(struct page *page)
+{
+ if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE)
+ return;
+
+ set_pageblock_migratetype(page, MIGRATE_ISOLATE);
+ page_zone(page)->nr_pageblock_isolate++;
+}
+
+/* called while holding zone->lock */
+static void restore_pageblock_isolate(struct page *page, int migratetype)
+{
+ struct zone *zone = page_zone(page);
+ if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE))
+ return;
+
+ BUG_ON(zone->nr_pageblock_isolate <= 0);
+ set_pageblock_migratetype(page, migratetype);
+ zone->nr_pageblock_isolate--;
+}
+
+int set_migratetype_isolate(struct page *page)
+{
+ struct zone *zone;
+ unsigned long flags, pfn;
+ struct memory_isolate_notify arg;
+ int notifier_ret;
+ int ret = -EBUSY;
+
+ zone = page_zone(page);
+
+ spin_lock_irqsave(&zone->lock, flags);
+
+ pfn = page_to_pfn(page);
+ arg.start_pfn = pfn;
+ arg.nr_pages = pageblock_nr_pages;
+ arg.pages_found = 0;
+
+ /*
+ * It may be possible to isolate a pageblock even if the
+ * migratetype is not MIGRATE_MOVABLE. The memory isolation
+ * notifier chain is used by balloon drivers to return the
+ * number of pages in a range that are held by the balloon
+ * driver to shrink memory. If all the pages are accounted for
+ * by balloons, are free, or on the LRU, isolation can continue.
+ * Later, for example, when memory hotplug notifier runs, these
+ * pages reported as "can be isolated" should be isolated(freed)
+ * by the balloon driver through the memory notifier chain.
+ */
+ notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
+ notifier_ret = notifier_to_errno(notifier_ret);
+ if (notifier_ret)
+ goto out;
+ /*
+ * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
+ * We just check MOVABLE pages.
+ */
+ if (!has_unmovable_pages(zone, page, arg.pages_found))
+ ret = 0;
+
+ /*
+ * immobile means "not-on-lru" paes. If immobile is larger than
+ * removable-by-driver pages reported by notifier, we'll fail.
+ */
+
+out:
+ if (!ret) {
+ set_pageblock_isolate(page);
+ move_freepages_block(zone, page, MIGRATE_ISOLATE);
+ }
+
+ spin_unlock_irqrestore(&zone->lock, flags);
+ if (!ret)
+ drain_all_pages();
+ return ret;
+}
+
+void unset_migratetype_isolate(struct page *page, unsigned migratetype)
+{
+ struct zone *zone;
+ unsigned long flags;
+ zone = page_zone(page);
+ spin_lock_irqsave(&zone->lock, flags);
+ if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
+ goto out;
+ move_freepages_block(zone, page, migratetype);
+ restore_pageblock_isolate(page, migratetype);
+out:
+ spin_unlock_irqrestore(&zone->lock, flags);
+}
+
static inline struct page *
__first_valid_page(unsigned long pfn, unsigned long nr_pages)
{