From c654345924f7cce87bb221b89db91cba890421ba Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Mon, 8 Oct 2012 16:28:21 -0700 Subject: mm: remove __GFP_NO_KSWAPD When transparent huge pages were introduced, memory compaction and swap storms were an issue, and the kernel had to be careful to not make THP allocations cause pageout or compaction. Now that we have working compaction deferral, kswapd is smart enough to invoke compaction and the quadratic behaviour around isolate_free_pages has been fixed, it should be safe to remove __GFP_NO_KSWAPD. [minchan@kernel.org: Comment fix] [mgorman@suse.de: Avoid direct reclaim for deferred compaction] Cc: Andrea Arcangeli Signed-off-by: Rik van Riel Signed-off-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c13ea7538891..5e92698e5395 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2362,9 +2362,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, goto nopage; restart: - if (!(gfp_mask & __GFP_NO_KSWAPD)) - wake_all_kswapd(order, zonelist, high_zoneidx, - zone_idx(preferred_zone)); + wake_all_kswapd(order, zonelist, high_zoneidx, + zone_idx(preferred_zone)); /* * OK, we're below the kswapd watermark and have kicked background @@ -2441,7 +2440,7 @@ rebalance: * system then fail the allocation instead of entering direct reclaim. */ if ((deferred_compaction || contended_compaction) && - (gfp_mask & __GFP_NO_KSWAPD)) + (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) goto nopage; /* Try direct reclaim and then allocating */ -- cgit v1.2.3 From 1fb3f8ca0e9222535a39b884cb67a34628411b9f Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 8 Oct 2012 16:29:12 -0700 Subject: mm: compaction: capture a suitable high-order page immediately when it is made available While compaction is migrating pages to free up large contiguous blocks for allocation it races with other allocation requests that may steal these blocks or break them up. This patch alters direct compaction to capture a suitable free page as soon as it becomes available to reduce this race. It uses similar logic to split_free_page() to ensure that watermarks are still obeyed. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Reviewed-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 4 +-- include/linux/mm.h | 1 + mm/compaction.c | 90 ++++++++++++++++++++++++++++++++++++++++------ mm/internal.h | 1 + mm/page_alloc.c | 63 +++++++++++++++++++++++--------- 5 files changed, 130 insertions(+), 29 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index ef658147e4e8..0e38a1deeb23 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, - bool sync, bool *contended); + bool sync, bool *contended, struct page **page); extern int compact_pgdat(pg_data_t *pgdat, int order); extern unsigned long compaction_suitable(struct zone *zone, int order); @@ -64,7 +64,7 @@ static inline bool compaction_deferred(struct zone *zone, int order) #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - bool sync, bool *contended) + bool sync, bool *contended, struct page **page) { return COMPACT_CONTINUE; } diff --git a/include/linux/mm.h b/include/linux/mm.h index 0514fe9d3c84..5ddb11b2b4bb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -442,6 +442,7 @@ void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); int split_free_page(struct page *page); +int capture_free_page(struct page *page, int alloc_order, int migratetype); /* * Compound pages have a destructor function. Provide a diff --git a/mm/compaction.c b/mm/compaction.c index 7168edc7592c..0fbc6b73a522 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -91,6 +91,60 @@ static inline bool compact_trylock_irqsave(spinlock_t *lock, return compact_checklock_irqsave(lock, flags, false, cc); } +static void compact_capture_page(struct compact_control *cc) +{ + unsigned long flags; + int mtype, mtype_low, mtype_high; + + if (!cc->page || *cc->page) + return; + + /* + * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP + * regardless of the migratetype of the freelist is is captured from. + * This is fine because the order for a high-order MIGRATE_MOVABLE + * allocation is typically at least a pageblock size and overall + * fragmentation is not impaired. Other allocation types must + * capture pages from their own migratelist because otherwise they + * could pollute other pageblocks like MIGRATE_MOVABLE with + * difficult to move pages and making fragmentation worse overall. + */ + if (cc->migratetype == MIGRATE_MOVABLE) { + mtype_low = 0; + mtype_high = MIGRATE_PCPTYPES; + } else { + mtype_low = cc->migratetype; + mtype_high = cc->migratetype + 1; + } + + /* Speculatively examine the free lists without zone lock */ + for (mtype = mtype_low; mtype < mtype_high; mtype++) { + int order; + for (order = cc->order; order < MAX_ORDER; order++) { + struct page *page; + struct free_area *area; + area = &(cc->zone->free_area[order]); + if (list_empty(&area->free_list[mtype])) + continue; + + /* Take the lock and attempt capture of the page */ + if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc)) + return; + if (!list_empty(&area->free_list[mtype])) { + page = list_entry(area->free_list[mtype].next, + struct page, lru); + if (capture_free_page(page, cc->order, mtype)) { + spin_unlock_irqrestore(&cc->zone->lock, + flags); + *cc->page = page; + return; + } + } + spin_unlock_irqrestore(&cc->zone->lock, flags); + } + } +} + /* * Isolate free pages onto a private freelist. Caller must hold zone->lock. * If @strict is true, will abort returning 0 on any invalid PFNs or non-free @@ -645,7 +699,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, static int compact_finished(struct zone *zone, struct compact_control *cc) { - unsigned int order; unsigned long watermark; if (fatal_signal_pending(current)) @@ -688,14 +741,22 @@ static int compact_finished(struct zone *zone, return COMPACT_CONTINUE; /* Direct compactor: Is a suitable page free? */ - for (order = cc->order; order < MAX_ORDER; order++) { - /* Job done if page is free of the right migratetype */ - if (!list_empty(&zone->free_area[order].free_list[cc->migratetype])) - return COMPACT_PARTIAL; - - /* Job done if allocation would set block type */ - if (order >= pageblock_order && zone->free_area[order].nr_free) + if (cc->page) { + /* Was a suitable page captured? */ + if (*cc->page) return COMPACT_PARTIAL; + } else { + unsigned int order; + for (order = cc->order; order < MAX_ORDER; order++) { + struct free_area *area = &zone->free_area[cc->order]; + /* Job done if page is free of the right migratetype */ + if (!list_empty(&area->free_list[cc->migratetype])) + return COMPACT_PARTIAL; + + /* Job done if allocation would set block type */ + if (cc->order >= pageblock_order && area->nr_free) + return COMPACT_PARTIAL; + } } return COMPACT_CONTINUE; @@ -817,6 +878,9 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) goto out; } } + + /* Capture a page now if it is a suitable size */ + compact_capture_page(cc); } out: @@ -829,7 +893,8 @@ out: static unsigned long compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, - bool sync, bool *contended) + bool sync, bool *contended, + struct page **page) { struct compact_control cc = { .nr_freepages = 0, @@ -839,6 +904,7 @@ static unsigned long compact_zone_order(struct zone *zone, .zone = zone, .sync = sync, .contended = contended, + .page = page, }; INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); @@ -860,7 +926,7 @@ int sysctl_extfrag_threshold = 500; */ unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - bool sync, bool *contended) + bool sync, bool *contended, struct page **page) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); int may_enter_fs = gfp_mask & __GFP_FS; @@ -881,7 +947,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, int status; status = compact_zone_order(zone, order, gfp_mask, sync, - contended); + contended, page); rc = max(status, rc); /* If a normal allocation would succeed, stop compacting */ @@ -936,6 +1002,7 @@ int compact_pgdat(pg_data_t *pgdat, int order) struct compact_control cc = { .order = order, .sync = false, + .page = NULL, }; return __compact_pgdat(pgdat, &cc); @@ -946,6 +1013,7 @@ static int compact_node(int nid) struct compact_control cc = { .order = -1, .sync = true, + .page = NULL, }; return __compact_pgdat(NODE_DATA(nid), &cc); diff --git a/mm/internal.h b/mm/internal.h index b8c91b342e24..e549a7fbc296 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -131,6 +131,7 @@ struct compact_control { int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; bool *contended; /* True if a lock was contended */ + struct page **page; /* Page captured of requested size */ }; unsigned long diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5e92698e5395..cfd565dbe124 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1380,16 +1380,11 @@ void split_page(struct page *page, unsigned int order) } /* - * Similar to split_page except the page is already free. As this is only - * being used for migration, the migratetype of the block also changes. - * As this is called with interrupts disabled, the caller is responsible - * for calling arch_alloc_page() and kernel_map_page() after interrupts - * are enabled. - * - * Note: this is probably too low level an operation for use in drivers. - * Please consult with lkml before using this in your driver. + * Similar to the split_page family of functions except that the page + * required at the given order and being isolated now to prevent races + * with parallel allocators */ -int split_free_page(struct page *page) +int capture_free_page(struct page *page, int alloc_order, int migratetype) { unsigned int order; unsigned long watermark; @@ -1411,10 +1406,11 @@ int split_free_page(struct page *page) rmv_page_order(page); __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); - /* Split into individual pages */ - set_page_refcounted(page); - split_page(page, order); + if (alloc_order != order) + expand(zone, page, alloc_order, order, + &zone->free_area[order], migratetype); + /* Set the pageblock if the captured page is at least a pageblock */ if (order >= pageblock_order - 1) { struct page *endpage = page + (1 << order) - 1; for (; page < endpage; page += pageblock_nr_pages) { @@ -1425,7 +1421,35 @@ int split_free_page(struct page *page) } } - return 1 << order; + return 1UL << order; +} + +/* + * Similar to split_page except the page is already free. As this is only + * being used for migration, the migratetype of the block also changes. + * As this is called with interrupts disabled, the caller is responsible + * for calling arch_alloc_page() and kernel_map_page() after interrupts + * are enabled. + * + * Note: this is probably too low level an operation for use in drivers. + * Please consult with lkml before using this in your driver. + */ +int split_free_page(struct page *page) +{ + unsigned int order; + int nr_pages; + + BUG_ON(!PageBuddy(page)); + order = page_order(page); + + nr_pages = capture_free_page(page, order, 0); + if (!nr_pages) + return 0; + + /* Split into individual pages */ + set_page_refcounted(page); + split_page(page, order); + return nr_pages; } /* @@ -2105,7 +2129,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, bool *contended_compaction, bool *deferred_compaction, unsigned long *did_some_progress) { - struct page *page; + struct page *page = NULL; if (!order) return NULL; @@ -2118,10 +2142,16 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, current->flags |= PF_MEMALLOC; *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, nodemask, sync_migration, - contended_compaction); + contended_compaction, &page); current->flags &= ~PF_MEMALLOC; - if (*did_some_progress != COMPACT_SKIPPED) { + /* If compaction captured a page, prep and use it */ + if (page) { + prep_new_page(page, order, gfp_mask); + goto got_page; + } + + if (*did_some_progress != COMPACT_SKIPPED) { /* Page migration frees to the PCP lists but we want merging */ drain_pages(get_cpu()); put_cpu(); @@ -2131,6 +2161,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, alloc_flags & ~ALLOC_NO_WATERMARKS, preferred_zone, migratetype); if (page) { +got_page: preferred_zone->compact_considered = 0; preferred_zone->compact_defer_shift = 0; if (order >= preferred_zone->compact_order_failed) -- cgit v1.2.3 From 02c6de8d757cb32c0829a45d81c3dfcbcafd998b Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Mon, 8 Oct 2012 16:31:55 -0700 Subject: mm: cma: discard clean pages during contiguous allocation instead of migration Drop clean cache pages instead of migration during alloc_contig_range() to minimise allocation latency by reducing the amount of migration that is necessary. It's useful for CMA because latency of migration is more important than evicting the background process's working set. In addition, as pages are reclaimed then fewer free pages for migration targets are required so it avoids memory reclaiming to get free pages, which is a contributory factor to increased latency. I measured elapsed time of __alloc_contig_migrate_range() which migrates 10M in 40M movable zone in QEMU machine. Before - 146ms, After - 7ms [akpm@linux-foundation.org: fix nommu build] Signed-off-by: Mel Gorman Signed-off-by: Minchan Kim Reviewed-by: Mel Gorman Cc: Marek Szyprowski Acked-by: Michal Nazarewicz Cc: Rik van Riel Tested-by: Kyungmin Park Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rmap.h | 21 +++++++++++---------- mm/internal.h | 3 ++- mm/page_alloc.c | 2 ++ mm/vmscan.c | 43 +++++++++++++++++++++++++++++++++++++------ 4 files changed, 52 insertions(+), 17 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b2cce644ffc7..bfe1f4780644 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -71,6 +71,17 @@ struct anon_vma_chain { #endif }; +enum ttu_flags { + TTU_UNMAP = 0, /* unmap mode */ + TTU_MIGRATION = 1, /* migration mode */ + TTU_MUNLOCK = 2, /* munlock mode */ + TTU_ACTION_MASK = 0xff, + + TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ + TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ + TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ +}; + #ifdef CONFIG_MMU static inline void get_anon_vma(struct anon_vma *anon_vma) { @@ -164,16 +175,6 @@ int page_referenced(struct page *, int is_locked, int page_referenced_one(struct page *, struct vm_area_struct *, unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); -enum ttu_flags { - TTU_UNMAP = 0, /* unmap mode */ - TTU_MIGRATION = 1, /* migration mode */ - TTU_MUNLOCK = 2, /* munlock mode */ - TTU_ACTION_MASK = 0xff, - - TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ - TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ - TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ -}; #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) int try_to_unmap(struct page *, enum ttu_flags flags); diff --git a/mm/internal.h b/mm/internal.h index bbd7b34db4ea..8312d4fadf59 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -356,5 +356,6 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, unsigned long, unsigned long); extern void set_pageblock_order(void); - +unsigned long reclaim_clean_pages_from_list(struct zone *zone, + struct list_head *page_list); #endif /* __MM_INTERNAL_H */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cfd565dbe124..cefd14e6dcf2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5700,6 +5700,8 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) break; } + reclaim_clean_pages_from_list(cc.zone, &cc.migratepages); + ret = migrate_pages(&cc.migratepages, __alloc_contig_migrate_alloc, 0, false, MIGRATE_SYNC); diff --git a/mm/vmscan.c b/mm/vmscan.c index d16bf5a53266..1ee4b69a28a5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -674,8 +674,10 @@ static enum page_references page_check_references(struct page *page, static unsigned long shrink_page_list(struct list_head *page_list, struct zone *zone, struct scan_control *sc, + enum ttu_flags ttu_flags, unsigned long *ret_nr_dirty, - unsigned long *ret_nr_writeback) + unsigned long *ret_nr_writeback, + bool force_reclaim) { LIST_HEAD(ret_pages); LIST_HEAD(free_pages); @@ -689,10 +691,10 @@ static unsigned long shrink_page_list(struct list_head *page_list, mem_cgroup_uncharge_start(); while (!list_empty(page_list)) { - enum page_references references; struct address_space *mapping; struct page *page; int may_enter_fs; + enum page_references references = PAGEREF_RECLAIM_CLEAN; cond_resched(); @@ -758,7 +760,9 @@ static unsigned long shrink_page_list(struct list_head *page_list, wait_on_page_writeback(page); } - references = page_check_references(page, sc); + if (!force_reclaim) + references = page_check_references(page, sc); + switch (references) { case PAGEREF_ACTIVATE: goto activate_locked; @@ -788,7 +792,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, * processes. Try to unmap it here. */ if (page_mapped(page) && mapping) { - switch (try_to_unmap(page, TTU_UNMAP)) { + switch (try_to_unmap(page, ttu_flags)) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: @@ -960,6 +964,33 @@ keep: return nr_reclaimed; } +unsigned long reclaim_clean_pages_from_list(struct zone *zone, + struct list_head *page_list) +{ + struct scan_control sc = { + .gfp_mask = GFP_KERNEL, + .priority = DEF_PRIORITY, + .may_unmap = 1, + }; + unsigned long ret, dummy1, dummy2; + struct page *page, *next; + LIST_HEAD(clean_pages); + + list_for_each_entry_safe(page, next, page_list, lru) { + if (page_is_file_cache(page) && !PageDirty(page)) { + ClearPageActive(page); + list_move(&page->lru, &clean_pages); + } + } + + ret = shrink_page_list(&clean_pages, zone, &sc, + TTU_UNMAP|TTU_IGNORE_ACCESS, + &dummy1, &dummy2, true); + list_splice(&clean_pages, page_list); + __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); + return ret; +} + /* * Attempt to remove the specified page from its LRU. Only take this page * if it is of the appropriate PageActive status. Pages which are being @@ -1278,8 +1309,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, if (nr_taken == 0) return 0; - nr_reclaimed = shrink_page_list(&page_list, zone, sc, - &nr_dirty, &nr_writeback); + nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, + &nr_dirty, &nr_writeback, false); spin_lock_irq(&zone->lru_lock); -- cgit v1.2.3 From 770c8aaaf6f04a87e6765f24d497132de9152a46 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Mon, 8 Oct 2012 16:31:57 -0700 Subject: mm: fix tracing in free_pcppages_bulk() page->private gets re-used in __free_one_page() to store page order (so trace_mm_page_pcpu_drain() may print order instead of migratetype) thus migratetype value must be cached locally. Fixes regression introduced in commit a7016235a61d ("mm: fix migratetype bug which slowed swapping"). This caused incorrect data to be attached to the mm_page_pcpu_drain trace event. [akpm@linux-foundation.org: add comment] Cc: Marek Szyprowski Cc: Michal Nazarewicz Acked-by: Minchan Kim Acked-by: Mel Gorman Cc: Hugh Dickins Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Acked-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cefd14e6dcf2..3f18a14effb8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -668,12 +668,15 @@ static void free_pcppages_bulk(struct zone *zone, int count, batch_free = to_free; do { + int mt; /* migratetype of the to-be-freed page */ + page = list_entry(list->prev, struct page, lru); /* must delete as __free_one_page list manipulates */ list_del(&page->lru); + mt = page_private(page); /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ - __free_one_page(page, zone, 0, page_private(page)); - trace_mm_page_pcpu_drain(page, 0, page_private(page)); + __free_one_page(page, zone, 0, mt); + trace_mm_page_pcpu_drain(page, 0, mt); } while (--to_free && --batch_free && !list_empty(list)); } __mod_zone_page_state(zone, NR_FREE_PAGES, count); -- cgit v1.2.3 From 2139cbe627b8910ded55148f87ee10f7485408ed Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Mon, 8 Oct 2012 16:32:00 -0700 Subject: cma: fix counting of isolated pages Isolated free pages shouldn't be accounted to NR_FREE_PAGES counter. Fix it by properly decreasing/increasing NR_FREE_PAGES counter in set_migratetype_isolate()/unset_migratetype_isolate() and removing counter adjustment for isolated pages from free_one_page() and split_free_page(). Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Cc: Marek Szyprowski Cc: Michal Nazarewicz Cc: Minchan Kim Cc: Mel Gorman Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 9 +++++++-- mm/page_isolation.c | 12 +++++++++--- 2 files changed, 16 insertions(+), 5 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3f18a14effb8..d259cc2b69c1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -691,7 +691,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order, zone->pages_scanned = 0; __free_one_page(page, zone, order, migratetype); - __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); + if (unlikely(migratetype != MIGRATE_ISOLATE)) + __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); spin_unlock(&zone->lock); } @@ -1392,6 +1393,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) unsigned int order; unsigned long watermark; struct zone *zone; + int mt; BUG_ON(!PageBuddy(page)); @@ -1407,7 +1409,10 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) list_del(&page->lru); zone->free_area[order].nr_free--; rmv_page_order(page); - __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); + + mt = get_pageblock_migratetype(page); + if (unlikely(mt != MIGRATE_ISOLATE)) + __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); if (alloc_order != order) expand(zone, page, alloc_order, order, diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 247d1f175739..3ca1716471bc 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -76,8 +76,12 @@ int set_migratetype_isolate(struct page *page) out: if (!ret) { + unsigned long nr_pages; + set_pageblock_isolate(page); - move_freepages_block(zone, page, MIGRATE_ISOLATE); + nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); + + __mod_zone_page_state(zone, NR_FREE_PAGES, -nr_pages); } spin_unlock_irqrestore(&zone->lock, flags); @@ -89,12 +93,14 @@ out: void unset_migratetype_isolate(struct page *page, unsigned migratetype) { struct zone *zone; - unsigned long flags; + unsigned long flags, nr_pages; + zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) goto out; - move_freepages_block(zone, page, migratetype); + nr_pages = move_freepages_block(zone, page, migratetype); + __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); restore_pageblock_isolate(page, migratetype); out: spin_unlock_irqrestore(&zone->lock, flags); -- cgit v1.2.3 From d1ce749a0db12202b711d1aba1d29e823034648d Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Mon, 8 Oct 2012 16:32:02 -0700 Subject: cma: count free CMA pages Add NR_FREE_CMA_PAGES counter to be later used for checking watermark in __zone_watermark_ok(). For simplicity and to avoid #ifdef hell make this counter always available (not only when CONFIG_CMA=y). [akpm@linux-foundation.org: use conventional migratetype naming] Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Cc: Marek Szyprowski Cc: Michal Nazarewicz Cc: Minchan Kim Cc: Mel Gorman Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 1 + include/linux/vmstat.h | 8 ++++++++ mm/page_alloc.c | 26 +++++++++++++++++++------- mm/page_isolation.c | 5 +++-- mm/vmstat.c | 1 + 5 files changed, 32 insertions(+), 9 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 2daa54f55db7..85ac67aa5770 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -142,6 +142,7 @@ enum zone_stat_item { NUMA_OTHER, /* allocation from other node */ #endif NR_ANON_TRANSPARENT_HUGEPAGES, + NR_FREE_CMA_PAGES, NR_VM_ZONE_STAT_ITEMS }; /* diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index ad2cfd53dadc..a5bb15018b5c 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -253,6 +253,14 @@ static inline void refresh_zone_stat_thresholds(void) { } #endif /* CONFIG_SMP */ +static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, + int migratetype) +{ + __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); + if (is_migrate_cma(migratetype)) + __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); +} + extern const char * const vmstat_text[]; #endif /* _LINUX_VMSTAT_H */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d259cc2b69c1..6969a8abdba2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -558,7 +558,8 @@ static inline void __free_one_page(struct page *page, if (page_is_guard(buddy)) { clear_page_guard_flag(buddy); set_page_private(page, 0); - __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); + __mod_zone_freepage_state(zone, 1 << order, + migratetype); } else { list_del(&buddy->lru); zone->free_area[order].nr_free--; @@ -677,6 +678,8 @@ static void free_pcppages_bulk(struct zone *zone, int count, /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ __free_one_page(page, zone, 0, mt); trace_mm_page_pcpu_drain(page, 0, mt); + if (is_migrate_cma(mt)) + __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); } while (--to_free && --batch_free && !list_empty(list)); } __mod_zone_page_state(zone, NR_FREE_PAGES, count); @@ -692,7 +695,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order, __free_one_page(page, zone, order, migratetype); if (unlikely(migratetype != MIGRATE_ISOLATE)) - __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); + __mod_zone_freepage_state(zone, 1 << order, migratetype); spin_unlock(&zone->lock); } @@ -815,7 +818,8 @@ static inline void expand(struct zone *zone, struct page *page, set_page_guard_flag(&page[size]); set_page_private(&page[size], high); /* Guard pages are not available for any usage */ - __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high)); + __mod_zone_freepage_state(zone, -(1 << high), + migratetype); continue; } #endif @@ -1141,6 +1145,9 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, } set_page_private(page, mt); list = &page->lru; + if (is_migrate_cma(mt)) + __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, + -(1 << order)); } __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); spin_unlock(&zone->lock); @@ -1412,7 +1419,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) mt = get_pageblock_migratetype(page); if (unlikely(mt != MIGRATE_ISOLATE)) - __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); + __mod_zone_freepage_state(zone, -(1UL << order), mt); if (alloc_order != order) expand(zone, page, alloc_order, order, @@ -1516,7 +1523,8 @@ again: spin_unlock(&zone->lock); if (!page) goto failed; - __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); + __mod_zone_freepage_state(zone, -(1 << order), + get_pageblock_migratetype(page)); } __count_zone_vm_events(PGALLOC, zone, 1 << order); @@ -2890,7 +2898,8 @@ void show_free_areas(unsigned int filter) " unevictable:%lu" " dirty:%lu writeback:%lu unstable:%lu\n" " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" - " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", + " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" + " free_cma:%lu\n", global_page_state(NR_ACTIVE_ANON), global_page_state(NR_INACTIVE_ANON), global_page_state(NR_ISOLATED_ANON), @@ -2907,7 +2916,8 @@ void show_free_areas(unsigned int filter) global_page_state(NR_FILE_MAPPED), global_page_state(NR_SHMEM), global_page_state(NR_PAGETABLE), - global_page_state(NR_BOUNCE)); + global_page_state(NR_BOUNCE), + global_page_state(NR_FREE_CMA_PAGES)); for_each_populated_zone(zone) { int i; @@ -2939,6 +2949,7 @@ void show_free_areas(unsigned int filter) " pagetables:%lukB" " unstable:%lukB" " bounce:%lukB" + " free_cma:%lukB" " writeback_tmp:%lukB" " pages_scanned:%lu" " all_unreclaimable? %s" @@ -2968,6 +2979,7 @@ void show_free_areas(unsigned int filter) K(zone_page_state(zone, NR_PAGETABLE)), K(zone_page_state(zone, NR_UNSTABLE_NFS)), K(zone_page_state(zone, NR_BOUNCE)), + K(zone_page_state(zone, NR_FREE_CMA_PAGES)), K(zone_page_state(zone, NR_WRITEBACK_TEMP)), zone->pages_scanned, (zone->all_unreclaimable ? "yes" : "no") diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 3ca1716471bc..345643b85bd4 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -77,11 +77,12 @@ int set_migratetype_isolate(struct page *page) out: if (!ret) { unsigned long nr_pages; + int migratetype = get_pageblock_migratetype(page); set_pageblock_isolate(page); nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); - __mod_zone_page_state(zone, NR_FREE_PAGES, -nr_pages); + __mod_zone_freepage_state(zone, -nr_pages, migratetype); } spin_unlock_irqrestore(&zone->lock, flags); @@ -100,7 +101,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) goto out; nr_pages = move_freepages_block(zone, page, migratetype); - __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); + __mod_zone_freepage_state(zone, nr_pages, migratetype); restore_pageblock_isolate(page, migratetype); out: spin_unlock_irqrestore(&zone->lock, flags); diff --git a/mm/vmstat.c b/mm/vmstat.c index b3e3b9d525d0..acbd85c983e6 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -722,6 +722,7 @@ const char * const vmstat_text[] = { "numa_other", #endif "nr_anon_transparent_hugepages", + "nr_free_cma", "nr_dirty_threshold", "nr_dirty_background_threshold", -- cgit v1.2.3 From d95ea5d18e699515468368415c93ed49b1a3221b Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Mon, 8 Oct 2012 16:32:05 -0700 Subject: cma: fix watermark checking * Add ALLOC_CMA alloc flag and pass it to [__]zone_watermark_ok() (from Minchan Kim). * During watermark check decrease available free pages number by free CMA pages number if necessary (unmovable allocations cannot use pages from CMA areas). Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Cc: Marek Szyprowski Cc: Michal Nazarewicz Cc: Minchan Kim Cc: Mel Gorman Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 8 +++++++- mm/internal.h | 14 ++++++++++++++ mm/page_alloc.c | 31 +++++++++++++++---------------- 3 files changed, 36 insertions(+), 17 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/mm/compaction.c b/mm/compaction.c index 0fbc6b73a522..1f61bcbd6262 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -934,6 +934,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, struct zoneref *z; struct zone *zone; int rc = COMPACT_SKIPPED; + int alloc_flags = 0; /* Check if the GFP flags allow compaction */ if (!order || !may_enter_fs || !may_perform_io) @@ -941,6 +942,10 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, count_vm_event(COMPACTSTALL); +#ifdef CONFIG_CMA + if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) + alloc_flags |= ALLOC_CMA; +#endif /* Compact each zone in the list */ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, nodemask) { @@ -951,7 +956,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, rc = max(status, rc); /* If a normal allocation would succeed, stop compacting */ - if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) + if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, + alloc_flags)) break; } diff --git a/mm/internal.h b/mm/internal.h index 8312d4fadf59..96cda4c6ac56 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -358,4 +358,18 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, extern void set_pageblock_order(void); unsigned long reclaim_clean_pages_from_list(struct zone *zone, struct list_head *page_list); +/* The ALLOC_WMARK bits are used as an index to zone->watermark */ +#define ALLOC_WMARK_MIN WMARK_MIN +#define ALLOC_WMARK_LOW WMARK_LOW +#define ALLOC_WMARK_HIGH WMARK_HIGH +#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ + +/* Mask to get the watermark bits */ +#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) + +#define ALLOC_HARDER 0x10 /* try to alloc harder */ +#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ +#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ +#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ + #endif /* __MM_INTERNAL_H */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6969a8abdba2..f2c7cc6a3039 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1541,19 +1541,6 @@ failed: return NULL; } -/* The ALLOC_WMARK bits are used as an index to zone->watermark */ -#define ALLOC_WMARK_MIN WMARK_MIN -#define ALLOC_WMARK_LOW WMARK_LOW -#define ALLOC_WMARK_HIGH WMARK_HIGH -#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ - -/* Mask to get the watermark bits */ -#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) - -#define ALLOC_HARDER 0x10 /* try to alloc harder */ -#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ -#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ - #ifdef CONFIG_FAIL_PAGE_ALLOC static struct { @@ -1648,7 +1635,11 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, min -= min / 2; if (alloc_flags & ALLOC_HARDER) min -= min / 4; - +#ifdef CONFIG_CMA + /* If allocation can't use CMA areas don't use free CMA pages */ + if (!(alloc_flags & ALLOC_CMA)) + free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); +#endif if (free_pages <= min + lowmem_reserve) return false; for (o = 0; o < order; o++) { @@ -2362,7 +2353,10 @@ gfp_to_alloc_flags(gfp_t gfp_mask) unlikely(test_thread_flag(TIF_MEMDIE)))) alloc_flags |= ALLOC_NO_WATERMARKS; } - +#ifdef CONFIG_CMA + if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) + alloc_flags |= ALLOC_CMA; +#endif return alloc_flags; } @@ -2587,6 +2581,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct page *page = NULL; int migratetype = allocflags_to_migratetype(gfp_mask); unsigned int cpuset_mems_cookie; + int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET; gfp_mask &= gfp_allowed_mask; @@ -2615,9 +2610,13 @@ retry_cpuset: if (!preferred_zone) goto out; +#ifdef CONFIG_CMA + if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) + alloc_flags |= ALLOC_CMA; +#endif /* First allocation attempt */ page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, - zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET, + zonelist, high_zoneidx, alloc_flags, preferred_zone, migratetype); if (unlikely(!page)) page = __alloc_pages_slowpath(gfp_mask, order, -- cgit v1.2.3 From b12c4ad14ee0232ad47c2bef404b6d42a3578332 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Mon, 8 Oct 2012 16:32:08 -0700 Subject: mm: page_alloc: use get_freepage_migratetype() instead of page_private() The page allocator uses set_page_private and page_private for handling migratetype when it frees page. Let's replace them with [set|get] _freepage_migratetype to make it more clear. Signed-off-by: Minchan Kim Acked-by: KAMEZAWA Hiroyuki Reviewed-by: Yasuaki Ishimatsu Acked-by: Mel Gorman Cc: Xishi Qiu Cc: Wen Congyang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 12 ++++++++++++ mm/page_alloc.c | 6 +++--- mm/page_isolation.c | 2 +- 3 files changed, 16 insertions(+), 4 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/mm.h b/include/linux/mm.h index 0d5f823ce3fc..4ed5c7367b9b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -237,6 +237,18 @@ struct inode; #define page_private(page) ((page)->private) #define set_page_private(page, v) ((page)->private = (v)) +/* It's valid only if the page is free path or free_list */ +static inline void set_freepage_migratetype(struct page *page, int migratetype) +{ + set_page_private(page, migratetype); +} + +/* It's valid only if the page is free path or free_list */ +static inline int get_freepage_migratetype(struct page *page) +{ + return page_private(page); +} + /* * FIXME: take this include out, include page-flags.h in * files which need it (119 of them) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f2c7cc6a3039..6aa0a8e89c5d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -674,7 +674,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, page = list_entry(list->prev, struct page, lru); /* must delete as __free_one_page list manipulates */ list_del(&page->lru); - mt = page_private(page); + mt = get_freepage_migratetype(page); /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ __free_one_page(page, zone, 0, mt); trace_mm_page_pcpu_drain(page, 0, mt); @@ -1143,7 +1143,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE) mt = migratetype; } - set_page_private(page, mt); + set_freepage_migratetype(page, mt); list = &page->lru; if (is_migrate_cma(mt)) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, @@ -1313,7 +1313,7 @@ void free_hot_cold_page(struct page *page, int cold) return; migratetype = get_pageblock_migratetype(page); - set_page_private(page, migratetype); + set_freepage_migratetype(page, migratetype); local_irq_save(flags); if (unlikely(wasMlocked)) free_page_mlock(page); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 345643b85bd4..9c03dca8c2ad 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -203,7 +203,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) if (PageBuddy(page)) pfn += 1 << page_order(page); else if (page_count(page) == 0 && - page_private(page) == MIGRATE_ISOLATE) + get_freepage_migratetype(page) == MIGRATE_ISOLATE) pfn += 1; else break; -- cgit v1.2.3 From 95e3441248053fc06bbb1dbbd34409a84211619e Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Mon, 8 Oct 2012 16:32:11 -0700 Subject: mm: remain migratetype in freed page The page allocator caches the pageblock information in page->private while it is in the PCP freelists but this is overwritten with the order of the page when freed to the buddy allocator. This patch stores the migratetype of the page in the page->index field so that it is available at all times when the page remain in free_list. This patch adds a new call site in __free_pages_ok so it might be overhead a bit but it's for high order allocation. So I believe damage isn't hurt. Signed-off-by: Minchan Kim Acked-by: KAMEZAWA Hiroyuki Reviewed-by: Yasuaki Ishimatsu Acked-by: Mel Gorman Cc: Xishi Qiu Cc: Wen Congyang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 4 ++-- mm/page_alloc.c | 7 +++++-- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/mm.h b/include/linux/mm.h index 4ed5c7367b9b..b01e585ab4b5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -240,13 +240,13 @@ struct inode; /* It's valid only if the page is free path or free_list */ static inline void set_freepage_migratetype(struct page *page, int migratetype) { - set_page_private(page, migratetype); + page->index = migratetype; } /* It's valid only if the page is free path or free_list */ static inline int get_freepage_migratetype(struct page *page) { - return page_private(page); + return page->index; } /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6aa0a8e89c5d..94fd283dde98 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -729,6 +729,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) { unsigned long flags; int wasMlocked = __TestClearPageMlocked(page); + int migratetype; if (!free_pages_prepare(page, order)) return; @@ -737,8 +738,9 @@ static void __free_pages_ok(struct page *page, unsigned int order) if (unlikely(wasMlocked)) free_page_mlock(page); __count_vm_events(PGFREE, 1 << order); - free_one_page(page_zone(page), page, order, - get_pageblock_migratetype(page)); + migratetype = get_pageblock_migratetype(page); + set_freepage_migratetype(page, migratetype); + free_one_page(page_zone(page), page, order, migratetype); local_irq_restore(flags); } @@ -959,6 +961,7 @@ static int move_freepages(struct zone *zone, order = page_order(page); list_move(&page->lru, &zone->free_area[order].free_list[migratetype]); + set_freepage_migratetype(page, migratetype); page += 1 << order; pages_moved += 1 << order; } -- cgit v1.2.3 From 435b405c06119d93333738172b8060b0ed12af41 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Mon, 8 Oct 2012 16:32:16 -0700 Subject: memory-hotplug: fix pages missed by race rather than failing If race between allocation and isolation in memory-hotplug offline happens, some pages could be in MIGRATE_MOVABLE of free_list although the pageblock's migratetype of the page is MIGRATE_ISOLATE. The race could be detected by get_freepage_migratetype in __test_page_isolated_in_pageblock. If it is detected, now EBUSY gets bubbled all the way up and the hotplug operations fails. But better idea is instead of returning and failing memory-hotremove, move the free page to the correct list at the time it is detected. It could enhance memory-hotremove operation success ratio although the race is really rare. Suggested by Mel Gorman. [akpm@linux-foundation.org: small cleanup] Signed-off-by: Minchan Kim Cc: KAMEZAWA Hiroyuki Reviewed-by: Yasuaki Ishimatsu Acked-by: Mel Gorman Cc: Xishi Qiu Cc: Wen Congyang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-isolation.h | 4 ++++ mm/page_alloc.c | 2 +- mm/page_isolation.c | 16 ++++++++++++++-- 3 files changed, 19 insertions(+), 3 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 105077aa7685..fca8c0a5c188 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -6,6 +6,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count); void set_pageblock_migratetype(struct page *page, int migratetype); int move_freepages_block(struct zone *zone, struct page *page, int migratetype); +int move_freepages(struct zone *zone, + struct page *start_page, struct page *end_page, + int migratetype); + /* * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. * If specified range includes migrate types other than MOVABLE or CMA, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 94fd283dde98..82f0b2f54f81 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -925,7 +925,7 @@ static int fallbacks[MIGRATE_TYPES][4] = { * Note that start_page and end_pages are not aligned on a pageblock * boundary. If alignment is required, use move_freepages_block() */ -static int move_freepages(struct zone *zone, +int move_freepages(struct zone *zone, struct page *start_page, struct page *end_page, int migratetype) { diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 6744235d2d0e..5f34a9053ce0 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -201,8 +201,20 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) } page = pfn_to_page(pfn); if (PageBuddy(page)) { - if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) - break; + /* + * If race between isolatation and allocation happens, + * some free pages could be in MIGRATE_MOVABLE list + * although pageblock's migratation type of the page + * is MIGRATE_ISOLATE. Catch it and move the page into + * MIGRATE_ISOLATE list. + */ + if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) { + struct page *end_page; + + end_page = page + (1 << page_order(page)) - 1; + move_freepages(page_zone(page), page, end_page, + MIGRATE_ISOLATE); + } pfn += 1 << page_order(page); } else if (page_count(page) == 0 && -- cgit v1.2.3 From f2d52fe51c8c0a18cf5fbe583bad51090d12c146 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 8 Oct 2012 16:32:24 -0700 Subject: mm/memblock: cleanup early_node_map[] related comments Commit 0ee332c14518 ("memblock: Kill early_node_map[]") removed early_node_map[]. Clean up the comments to comply with that change. Signed-off-by: Wanpeng Li Cc: Michal Hocko Cc: KAMEZAWA Hiroyuki Cc: Minchan Kim Cc: Gavin Shan Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memblock.h | 3 +-- mm/nobootmem.c | 2 -- mm/page_alloc.c | 2 +- 3 files changed, 2 insertions(+), 5 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 19dc455b4f3d..569d67d4243e 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -70,8 +70,7 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, * @p_end: ptr to ulong for end pfn of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL * - * Walks over configured memory ranges. Available after early_node_map is - * populated. + * Walks over configured memory ranges. */ #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 405573010f99..bd82f6b31411 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -162,8 +162,6 @@ unsigned long __init free_all_bootmem(void) * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id * because in some case like Node0 doesn't have RAM installed * low ram will be on Node1 - * Use MAX_NUMNODES will make sure all ranges in early_node_map[] - * will be used instead of only Node0 related */ return free_low_memory_core_early(MAX_NUMNODES); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 82f0b2f54f81..ca002b39b9b4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4931,7 +4931,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) zone_movable_pfn[i] << PAGE_SHIFT); } - /* Print out the early_node_map[] */ + /* Print out the early node map */ printk("Early memory node ranges\n"); for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) printk(" node %3d: [mem %#010lx-%#010lx]\n", nid, -- cgit v1.2.3 From 753341a4b85ff337487b9959c71c529f522004f4 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 8 Oct 2012 16:32:40 -0700 Subject: revert "mm: have order > 0 compaction start off where it left" This reverts commit 7db8889ab05b ("mm: have order > 0 compaction start off where it left") and commit de74f1cc ("mm: have order > 0 compaction start near a pageblock with free pages"). These patches were a good idea and tests confirmed that they massively reduced the amount of scanning but the implementation is complex and tricky to understand. A later patch will cache what pageblocks should be skipped and reimplements the concept of compact_cached_free_pfn on top for both migration and free scanners. Signed-off-by: Mel Gorman Acked-by: Rik van Riel Cc: Richard Davies Cc: Shaohua Li Cc: Avi Kivity Acked-by: Rafael Aquini Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 4 ---- mm/compaction.c | 65 ++++---------------------------------------------- mm/internal.h | 6 ----- mm/page_alloc.c | 5 ---- 4 files changed, 5 insertions(+), 75 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 85ac67aa5770..16a4cc2950a0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -369,10 +369,6 @@ struct zone { */ spinlock_t lock; int all_unreclaimable; /* All pages pinned */ -#if defined CONFIG_COMPACTION || defined CONFIG_CMA - /* pfn where the last incremental compaction isolated free pages */ - unsigned long compact_cached_free_pfn; -#endif #ifdef CONFIG_MEMORY_HOTPLUG /* see spanned/present_pages for more description */ seqlock_t span_seqlock; diff --git a/mm/compaction.c b/mm/compaction.c index bdf6e13045ea..db76361a3117 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -537,20 +537,6 @@ next_pageblock: #endif /* CONFIG_COMPACTION || CONFIG_CMA */ #ifdef CONFIG_COMPACTION -/* - * Returns the start pfn of the last page block in a zone. This is the starting - * point for full compaction of a zone. Compaction searches for free pages from - * the end of each zone, while isolate_freepages_block scans forward inside each - * page block. - */ -static unsigned long start_free_pfn(struct zone *zone) -{ - unsigned long free_pfn; - free_pfn = zone->zone_start_pfn + zone->spanned_pages; - free_pfn &= ~(pageblock_nr_pages-1); - return free_pfn; -} - /* * Based on information in the current compact_control, find blocks * suitable for isolating free pages from and then isolate them. @@ -619,19 +605,8 @@ static void isolate_freepages(struct zone *zone, * looking for free pages, the search will restart here as * page migration may have returned some pages to the allocator */ - if (isolated) { + if (isolated) high_pfn = max(high_pfn, pfn); - - /* - * If the free scanner has wrapped, update - * compact_cached_free_pfn to point to the highest - * pageblock with free pages. This reduces excessive - * scanning of full pageblocks near the end of the - * zone - */ - if (cc->order > 0 && cc->wrapped) - zone->compact_cached_free_pfn = high_pfn; - } } /* split_free_page does not map the pages */ @@ -639,11 +614,6 @@ static void isolate_freepages(struct zone *zone, cc->free_pfn = high_pfn; cc->nr_freepages = nr_freepages; - - /* If compact_cached_free_pfn is reset then set it now */ - if (cc->order > 0 && !cc->wrapped && - zone->compact_cached_free_pfn == start_free_pfn(zone)) - zone->compact_cached_free_pfn = high_pfn; } /* @@ -738,26 +708,8 @@ static int compact_finished(struct zone *zone, if (fatal_signal_pending(current)) return COMPACT_PARTIAL; - /* - * A full (order == -1) compaction run starts at the beginning and - * end of a zone; it completes when the migrate and free scanner meet. - * A partial (order > 0) compaction can start with the free scanner - * at a random point in the zone, and may have to restart. - */ - if (cc->free_pfn <= cc->migrate_pfn) { - if (cc->order > 0 && !cc->wrapped) { - /* We started partway through; restart at the end. */ - unsigned long free_pfn = start_free_pfn(zone); - zone->compact_cached_free_pfn = free_pfn; - cc->free_pfn = free_pfn; - cc->wrapped = 1; - return COMPACT_CONTINUE; - } - return COMPACT_COMPLETE; - } - - /* We wrapped around and ended up where we started. */ - if (cc->wrapped && cc->free_pfn <= cc->start_free_pfn) + /* Compaction run completes if the migrate and free scanner meet */ + if (cc->free_pfn <= cc->migrate_pfn) return COMPACT_COMPLETE; /* @@ -863,15 +815,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) /* Setup to move all movable pages to the end of the zone */ cc->migrate_pfn = zone->zone_start_pfn; - - if (cc->order > 0) { - /* Incremental compaction. Start where the last one stopped. */ - cc->free_pfn = zone->compact_cached_free_pfn; - cc->start_free_pfn = cc->free_pfn; - } else { - /* Order == -1 starts at the end of the zone. */ - cc->free_pfn = start_free_pfn(zone); - } + cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; + cc->free_pfn &= ~(pageblock_nr_pages-1); migrate_prep_local(); diff --git a/mm/internal.h b/mm/internal.h index 97664be2ca37..6f6bb9ab9386 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -118,14 +118,8 @@ struct compact_control { unsigned long nr_freepages; /* Number of isolated free pages */ unsigned long nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ - unsigned long start_free_pfn; /* where we started the search */ unsigned long migrate_pfn; /* isolate_migratepages search base */ bool sync; /* Synchronous migration */ - bool wrapped; /* Order > 0 compactions are - incremental, once free_pfn - and migrate_pfn meet, we restart - from the top of the zone; - remember we wrapped around. */ int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ca002b39b9b4..628968c1ccf4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4490,11 +4490,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, zone->spanned_pages = size; zone->present_pages = realsize; -#if defined CONFIG_COMPACTION || defined CONFIG_CMA - zone->compact_cached_free_pfn = zone->zone_start_pfn + - zone->spanned_pages; - zone->compact_cached_free_pfn &= ~(pageblock_nr_pages-1); -#endif #ifdef CONFIG_NUMA zone->node = nid; zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) -- cgit v1.2.3 From bb13ffeb9f6bfeb301443994dfbf29f91117dfb3 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 8 Oct 2012 16:32:41 -0700 Subject: mm: compaction: cache if a pageblock was scanned and no pages were isolated When compaction was implemented it was known that scanning could potentially be excessive. The ideal was that a counter be maintained for each pageblock but maintaining this information would incur a severe penalty due to a shared writable cache line. It has reached the point where the scanning costs are a serious problem, particularly on long-lived systems where a large process starts and allocates a large number of THPs at the same time. Instead of using a shared counter, this patch adds another bit to the pageblock flags called PG_migrate_skip. If a pageblock is scanned by either migrate or free scanner and 0 pages were isolated, the pageblock is marked to be skipped in the future. When scanning, this bit is checked before any scanning takes place and the block skipped if set. The main difficulty with a patch like this is "when to ignore the cached information?" If it's ignored too often, the scanning rates will still be excessive. If the information is too stale then allocations will fail that might have otherwise succeeded. In this patch o CMA always ignores the information o If the migrate and free scanner meet then the cached information will be discarded if it's at least 5 seconds since the last time the cache was discarded o If there are a large number of allocation failures, discard the cache. The time-based heuristic is very clumsy but there are few choices for a better event. Depending solely on multiple allocation failures still allows excessive scanning when THP allocations are failing in quick succession due to memory pressure. Waiting until memory pressure is relieved would cause compaction to continually fail instead of using reclaim/compaction to try allocate the page. The time-based mechanism is clumsy but a better option is not obvious. Signed-off-by: Mel Gorman Acked-by: Rik van Riel Cc: Richard Davies Cc: Shaohua Li Cc: Avi Kivity Acked-by: Rafael Aquini Cc: Fengguang Wu Cc: Michal Nazarewicz Cc: Bartlomiej Zolnierkiewicz Cc: Kyungmin Park Cc: Mark Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 3 + include/linux/pageblock-flags.h | 19 +++++- mm/compaction.c | 125 ++++++++++++++++++++++++++++++++++------ mm/internal.h | 4 +- mm/page_alloc.c | 38 ++++++------ 5 files changed, 151 insertions(+), 38 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 16a4cc2950a0..f85ecc9cfa1b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -369,6 +369,9 @@ struct zone { */ spinlock_t lock; int all_unreclaimable; /* All pages pinned */ +#if defined CONFIG_COMPACTION || defined CONFIG_CMA + unsigned long compact_blockskip_expire; +#endif #ifdef CONFIG_MEMORY_HOTPLUG /* see spanned/present_pages for more description */ seqlock_t span_seqlock; diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 19ef95d293ae..eed27f4f4c3e 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -30,6 +30,9 @@ enum pageblock_bits { PB_migrate, PB_migrate_end = PB_migrate + 3 - 1, /* 3 bits required for migrate types */ +#ifdef CONFIG_COMPACTION + PB_migrate_skip,/* If set the block is skipped by compaction */ +#endif /* CONFIG_COMPACTION */ NR_PAGEBLOCK_BITS }; @@ -65,10 +68,22 @@ unsigned long get_pageblock_flags_group(struct page *page, void set_pageblock_flags_group(struct page *page, unsigned long flags, int start_bitidx, int end_bitidx); +#ifdef CONFIG_COMPACTION +#define get_pageblock_skip(page) \ + get_pageblock_flags_group(page, PB_migrate_skip, \ + PB_migrate_skip + 1) +#define clear_pageblock_skip(page) \ + set_pageblock_flags_group(page, 0, PB_migrate_skip, \ + PB_migrate_skip + 1) +#define set_pageblock_skip(page) \ + set_pageblock_flags_group(page, 1, PB_migrate_skip, \ + PB_migrate_skip + 1) +#endif /* CONFIG_COMPACTION */ + #define get_pageblock_flags(page) \ - get_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1) + get_pageblock_flags_group(page, 0, PB_migrate_end) #define set_pageblock_flags(page, flags) \ set_pageblock_flags_group(page, flags, \ - 0, NR_PAGEBLOCK_BITS-1) + 0, PB_migrate_end) #endif /* PAGEBLOCK_FLAGS_H */ diff --git a/mm/compaction.c b/mm/compaction.c index db76361a3117..d9dbb97e607b 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -50,6 +50,79 @@ static inline bool migrate_async_suitable(int migratetype) return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; } +#ifdef CONFIG_COMPACTION +/* Returns true if the pageblock should be scanned for pages to isolate. */ +static inline bool isolation_suitable(struct compact_control *cc, + struct page *page) +{ + if (cc->ignore_skip_hint) + return true; + + return !get_pageblock_skip(page); +} + +/* + * This function is called to clear all cached information on pageblocks that + * should be skipped for page isolation when the migrate and free page scanner + * meet. + */ +static void reset_isolation_suitable(struct zone *zone) +{ + unsigned long start_pfn = zone->zone_start_pfn; + unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; + unsigned long pfn; + + /* + * Do not reset more than once every five seconds. If allocations are + * failing sufficiently quickly to allow this to happen then continually + * scanning for compaction is not going to help. The choice of five + * seconds is arbitrary but will mitigate excessive scanning. + */ + if (time_before(jiffies, zone->compact_blockskip_expire)) + return; + zone->compact_blockskip_expire = jiffies + (HZ * 5); + + /* Walk the zone and mark every pageblock as suitable for isolation */ + for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { + struct page *page; + + cond_resched(); + + if (!pfn_valid(pfn)) + continue; + + page = pfn_to_page(pfn); + if (zone != page_zone(page)) + continue; + + clear_pageblock_skip(page); + } +} + +/* + * If no pages were isolated then mark this pageblock to be skipped in the + * future. The information is later cleared by reset_isolation_suitable(). + */ +static void update_pageblock_skip(struct page *page, unsigned long nr_isolated) +{ + if (!page) + return; + + if (!nr_isolated) + set_pageblock_skip(page); +} +#else +static inline bool isolation_suitable(struct compact_control *cc, + struct page *page) +{ + return true; +} + +static void update_pageblock_skip(struct page *page, unsigned long nr_isolated) +{ +} +#endif /* CONFIG_COMPACTION */ + static inline bool should_release_lock(spinlock_t *lock) { return need_resched() || spin_is_contended(lock); @@ -181,7 +254,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, bool strict) { int nr_scanned = 0, total_isolated = 0; - struct page *cursor; + struct page *cursor, *valid_page = NULL; unsigned long nr_strict_required = end_pfn - blockpfn; unsigned long flags; bool locked = false; @@ -196,6 +269,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, nr_scanned++; if (!pfn_valid_within(blockpfn)) continue; + if (!valid_page) + valid_page = page; if (!PageBuddy(page)) continue; @@ -250,6 +325,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, if (locked) spin_unlock_irqrestore(&cc->zone->lock, flags); + /* Update the pageblock-skip if the whole pageblock was scanned */ + if (blockpfn == end_pfn) + update_pageblock_skip(valid_page, total_isolated); + return total_isolated; } @@ -267,22 +346,14 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, * a free page). */ unsigned long -isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) +isolate_freepages_range(struct compact_control *cc, + unsigned long start_pfn, unsigned long end_pfn) { unsigned long isolated, pfn, block_end_pfn; - struct zone *zone = NULL; LIST_HEAD(freelist); - /* cc needed for isolate_freepages_block to acquire zone->lock */ - struct compact_control cc = { - .sync = true, - }; - - if (pfn_valid(start_pfn)) - cc.zone = zone = page_zone(pfn_to_page(start_pfn)); - for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { - if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn))) + if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn))) break; /* @@ -292,7 +363,7 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); block_end_pfn = min(block_end_pfn, end_pfn); - isolated = isolate_freepages_block(&cc, pfn, block_end_pfn, + isolated = isolate_freepages_block(cc, pfn, block_end_pfn, &freelist, true); /* @@ -387,6 +458,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, struct lruvec *lruvec; unsigned long flags; bool locked = false; + struct page *page = NULL, *valid_page = NULL; /* * Ensure that there are not too many pages isolated from the LRU @@ -407,8 +479,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, /* Time to isolate some pages for migration */ cond_resched(); for (; low_pfn < end_pfn; low_pfn++) { - struct page *page; - /* give a chance to irqs before checking need_resched() */ if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) { if (should_release_lock(&zone->lru_lock)) { @@ -444,6 +514,14 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, if (page_zone(page) != zone) continue; + if (!valid_page) + valid_page = page; + + /* If isolation recently failed, do not retry */ + pageblock_nr = low_pfn >> pageblock_order; + if (!isolation_suitable(cc, page)) + goto next_pageblock; + /* Skip if free */ if (PageBuddy(page)) continue; @@ -453,7 +531,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, * migration is optimistic to see if the minimum amount of work * satisfies the allocation */ - pageblock_nr = low_pfn >> pageblock_order; if (!cc->sync && last_pageblock_nr != pageblock_nr && !migrate_async_suitable(get_pageblock_migratetype(page))) { goto next_pageblock; @@ -530,6 +607,10 @@ next_pageblock: if (locked) spin_unlock_irqrestore(&zone->lru_lock, flags); + /* Update the pageblock-skip if the whole pageblock was scanned */ + if (low_pfn == end_pfn) + update_pageblock_skip(valid_page, nr_isolated); + trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); return low_pfn; @@ -593,6 +674,10 @@ static void isolate_freepages(struct zone *zone, if (!suitable_migration_target(page)) continue; + /* If isolation recently failed, do not retry */ + if (!isolation_suitable(cc, page)) + continue; + /* Found a block suitable for isolating free pages from */ isolated = 0; end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); @@ -709,8 +794,10 @@ static int compact_finished(struct zone *zone, return COMPACT_PARTIAL; /* Compaction run completes if the migrate and free scanner meet */ - if (cc->free_pfn <= cc->migrate_pfn) + if (cc->free_pfn <= cc->migrate_pfn) { + reset_isolation_suitable(cc->zone); return COMPACT_COMPLETE; + } /* * order == -1 is expected when compacting via @@ -818,6 +905,10 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; cc->free_pfn &= ~(pageblock_nr_pages-1); + /* Clear pageblock skip if there are numerous alloc failures */ + if (zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT) + reset_isolation_suitable(zone); + migrate_prep_local(); while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { diff --git a/mm/internal.h b/mm/internal.h index 6f6bb9ab9386..7ba56ac360b2 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -120,6 +120,7 @@ struct compact_control { unsigned long free_pfn; /* isolate_freepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */ bool sync; /* Synchronous migration */ + bool ignore_skip_hint; /* Scan blocks even if marked skip */ int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ @@ -129,7 +130,8 @@ struct compact_control { }; unsigned long -isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn); +isolate_freepages_range(struct compact_control *cc, + unsigned long start_pfn, unsigned long end_pfn); unsigned long isolate_migratepages_range(struct zone *zone, struct compact_control *cc, unsigned long low_pfn, unsigned long end_pfn); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 628968c1ccf4..44c56049edf9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5679,7 +5679,8 @@ __alloc_contig_migrate_alloc(struct page *page, unsigned long private, } /* [start, end) must belong to a single zone. */ -static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) +static int __alloc_contig_migrate_range(struct compact_control *cc, + unsigned long start, unsigned long end) { /* This function is based on compact_zone() from compaction.c. */ @@ -5687,25 +5688,17 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) unsigned int tries = 0; int ret = 0; - struct compact_control cc = { - .nr_migratepages = 0, - .order = -1, - .zone = page_zone(pfn_to_page(start)), - .sync = true, - }; - INIT_LIST_HEAD(&cc.migratepages); - migrate_prep_local(); - while (pfn < end || !list_empty(&cc.migratepages)) { + while (pfn < end || !list_empty(&cc->migratepages)) { if (fatal_signal_pending(current)) { ret = -EINTR; break; } - if (list_empty(&cc.migratepages)) { - cc.nr_migratepages = 0; - pfn = isolate_migratepages_range(cc.zone, &cc, + if (list_empty(&cc->migratepages)) { + cc->nr_migratepages = 0; + pfn = isolate_migratepages_range(cc->zone, cc, pfn, end); if (!pfn) { ret = -EINTR; @@ -5717,14 +5710,14 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) break; } - reclaim_clean_pages_from_list(cc.zone, &cc.migratepages); + reclaim_clean_pages_from_list(cc->zone, &cc->migratepages); - ret = migrate_pages(&cc.migratepages, + ret = migrate_pages(&cc->migratepages, __alloc_contig_migrate_alloc, 0, false, MIGRATE_SYNC); } - putback_lru_pages(&cc.migratepages); + putback_lru_pages(&cc->migratepages); return ret > 0 ? 0 : ret; } @@ -5803,6 +5796,15 @@ int alloc_contig_range(unsigned long start, unsigned long end, unsigned long outer_start, outer_end; int ret = 0, order; + struct compact_control cc = { + .nr_migratepages = 0, + .order = -1, + .zone = page_zone(pfn_to_page(start)), + .sync = true, + .ignore_skip_hint = true, + }; + INIT_LIST_HEAD(&cc.migratepages); + /* * What we do here is we mark all pageblocks in range as * MIGRATE_ISOLATE. Because pageblock and max order pages may @@ -5832,7 +5834,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, if (ret) goto done; - ret = __alloc_contig_migrate_range(start, end); + ret = __alloc_contig_migrate_range(&cc, start, end); if (ret) goto done; @@ -5881,7 +5883,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start); /* Grab isolated pages from freelists. */ - outer_end = isolate_freepages_range(outer_start, end); + outer_end = isolate_freepages_range(&cc, outer_start, end); if (!outer_end) { ret = -EBUSY; goto done; -- cgit v1.2.3 From 62997027ca5b3d4618198ed8b1aba40b61b1137b Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 8 Oct 2012 16:32:47 -0700 Subject: mm: compaction: clear PG_migrate_skip based on compaction and reclaim activity Compaction caches if a pageblock was scanned and no pages were isolated so that the pageblocks can be skipped in the future to reduce scanning. This information is not cleared by the page allocator based on activity due to the impact it would have to the page allocator fast paths. Hence there is a requirement that something clear the cache or pageblocks will be skipped forever. Currently the cache is cleared if there were a number of recent allocation failures and it has not been cleared within the last 5 seconds. Time-based decisions like this are terrible as they have no relationship to VM activity and is basically a big hammer. Unfortunately, accurate heuristics would add cost to some hot paths so this patch implements a rough heuristic. There are two cases where the cache is cleared. 1. If a !kswapd process completes a compaction cycle (migrate and free scanner meet), the zone is marked compact_blockskip_flush. When kswapd goes to sleep, it will clear the cache. This is expected to be the common case where the cache is cleared. It does not really matter if kswapd happens to be asleep or going to sleep when the flag is set as it will be woken on the next allocation request. 2. If there have been multiple failures recently and compaction just finished being deferred then a process will clear the cache and start a full scan. This situation happens if there are multiple high-order allocation requests under heavy memory pressure. The clearing of the PG_migrate_skip bits and other scans is inherently racy but the race is harmless. For allocations that can fail such as THP, they will simply fail. For requests that cannot fail, they will retry the allocation. Tests indicated that scanning rates were roughly similar to when the time-based heuristic was used and the allocation success rates were similar. Signed-off-by: Mel Gorman Cc: Rik van Riel Cc: Richard Davies Cc: Shaohua Li Cc: Avi Kivity Cc: Rafael Aquini Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 15 ++++++++++++++ include/linux/mmzone.h | 3 ++- mm/compaction.c | 50 +++++++++++++++++++++++++++++++--------------- mm/page_alloc.c | 1 + mm/vmscan.c | 8 ++++++++ 5 files changed, 60 insertions(+), 17 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 0e38a1deeb23..6ecb6dc2f303 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -24,6 +24,7 @@ extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, bool sync, bool *contended, struct page **page); extern int compact_pgdat(pg_data_t *pgdat, int order); +extern void reset_isolation_suitable(pg_data_t *pgdat); extern unsigned long compaction_suitable(struct zone *zone, int order); /* Do not skip compaction more than 64 times */ @@ -61,6 +62,16 @@ static inline bool compaction_deferred(struct zone *zone, int order) return zone->compact_considered < defer_limit; } +/* Returns true if restarting compaction after many failures */ +static inline bool compaction_restarting(struct zone *zone, int order) +{ + if (order < zone->compact_order_failed) + return false; + + return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && + zone->compact_considered >= 1UL << zone->compact_defer_shift; +} + #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, @@ -74,6 +85,10 @@ static inline int compact_pgdat(pg_data_t *pgdat, int order) return COMPACT_CONTINUE; } +static inline void reset_isolation_suitable(pg_data_t *pgdat) +{ +} + static inline unsigned long compaction_suitable(struct zone *zone, int order) { return COMPACT_SKIPPED; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c8b3abc97a1e..d240efa8f846 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -370,7 +370,8 @@ struct zone { spinlock_t lock; int all_unreclaimable; /* All pages pinned */ #if defined CONFIG_COMPACTION || defined CONFIG_CMA - unsigned long compact_blockskip_expire; + /* Set to true when the PG_migrate_skip bits should be cleared */ + bool compact_blockskip_flush; /* pfns where compaction scanners should start */ unsigned long compact_cached_free_pfn; diff --git a/mm/compaction.c b/mm/compaction.c index f94cbc0b99a5..d8187f9cabbf 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -66,24 +66,15 @@ static inline bool isolation_suitable(struct compact_control *cc, * should be skipped for page isolation when the migrate and free page scanner * meet. */ -static void reset_isolation_suitable(struct zone *zone) +static void __reset_isolation_suitable(struct zone *zone) { unsigned long start_pfn = zone->zone_start_pfn; unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; unsigned long pfn; - /* - * Do not reset more than once every five seconds. If allocations are - * failing sufficiently quickly to allow this to happen then continually - * scanning for compaction is not going to help. The choice of five - * seconds is arbitrary but will mitigate excessive scanning. - */ - if (time_before(jiffies, zone->compact_blockskip_expire)) - return; - zone->compact_cached_migrate_pfn = start_pfn; zone->compact_cached_free_pfn = end_pfn; - zone->compact_blockskip_expire = jiffies + (HZ * 5); + zone->compact_blockskip_flush = false; /* Walk the zone and mark every pageblock as suitable for isolation */ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { @@ -102,9 +93,24 @@ static void reset_isolation_suitable(struct zone *zone) } } +void reset_isolation_suitable(pg_data_t *pgdat) +{ + int zoneid; + + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { + struct zone *zone = &pgdat->node_zones[zoneid]; + if (!populated_zone(zone)) + continue; + + /* Only flush if a full compaction finished recently */ + if (zone->compact_blockskip_flush) + __reset_isolation_suitable(zone); + } +} + /* * If no pages were isolated then mark this pageblock to be skipped in the - * future. The information is later cleared by reset_isolation_suitable(). + * future. The information is later cleared by __reset_isolation_suitable(). */ static void update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long nr_isolated, @@ -820,7 +826,15 @@ static int compact_finished(struct zone *zone, /* Compaction run completes if the migrate and free scanner meet */ if (cc->free_pfn <= cc->migrate_pfn) { - reset_isolation_suitable(cc->zone); + /* + * Mark that the PG_migrate_skip information should be cleared + * by kswapd when it goes to sleep. kswapd does not set the + * flag itself as the decision to be clear should be directly + * based on an allocation request. + */ + if (!current_is_kswapd()) + zone->compact_blockskip_flush = true; + return COMPACT_COMPLETE; } @@ -943,9 +957,13 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) zone->compact_cached_migrate_pfn = cc->migrate_pfn; } - /* Clear pageblock skip if there are numerous alloc failures */ - if (zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT) - reset_isolation_suitable(zone); + /* + * Clear pageblock skip if there were failures recently and compaction + * is about to be retried after being deferred. kswapd does not do + * this reset as it'll reset the cached information when going to sleep. + */ + if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) + __reset_isolation_suitable(zone); migrate_prep_local(); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 44c56049edf9..b97cf12f07a9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2172,6 +2172,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, preferred_zone, migratetype); if (page) { got_page: + preferred_zone->compact_blockskip_flush = false; preferred_zone->compact_considered = 0; preferred_zone->compact_defer_shift = 0; if (order >= preferred_zone->compact_order_failed) diff --git a/mm/vmscan.c b/mm/vmscan.c index 1ee4b69a28a5..b010efc43891 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2895,6 +2895,14 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) */ set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); + /* + * Compaction records what page blocks it recently failed to + * isolate pages from and skips them in the future scanning. + * When kswapd is going to sleep, it is reasonable to assume + * that pages and compaction may succeed so reset the cache. + */ + reset_isolation_suitable(pgdat); + if (!kthread_should_stop()) schedule(); -- cgit v1.2.3 From 723a0644a7255f532575fd43245f9ef976491328 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Mon, 8 Oct 2012 16:32:52 -0700 Subject: mm/page_alloc: refactor out __alloc_contig_migrate_alloc() __alloc_contig_migrate_alloc() can be used by memory-hotplug so refactor it out (move + rename as a common name) into page_isolation.c. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Minchan Kim Cc: Kamezawa Hiroyuki Reviewed-by: Yasuaki Ishimatsu Acked-by: Michal Nazarewicz Cc: Marek Szyprowski Cc: Wen Congyang Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-isolation.h | 3 ++- mm/page_alloc.c | 14 +------------- mm/page_isolation.c | 11 +++++++++++ 3 files changed, 14 insertions(+), 14 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index fca8c0a5c188..76a9539cfd3f 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -41,6 +41,7 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn); */ int set_migratetype_isolate(struct page *page); void unset_migratetype_isolate(struct page *page, unsigned migratetype); - +struct page *alloc_migrate_target(struct page *page, unsigned long private, + int **resultp); #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b97cf12f07a9..8ac593893e6e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5667,18 +5667,6 @@ static unsigned long pfn_max_align_up(unsigned long pfn) pageblock_nr_pages)); } -static struct page * -__alloc_contig_migrate_alloc(struct page *page, unsigned long private, - int **resultp) -{ - gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; - - if (PageHighMem(page)) - gfp_mask |= __GFP_HIGHMEM; - - return alloc_page(gfp_mask); -} - /* [start, end) must belong to a single zone. */ static int __alloc_contig_migrate_range(struct compact_control *cc, unsigned long start, unsigned long end) @@ -5714,7 +5702,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, reclaim_clean_pages_from_list(cc->zone, &cc->migratepages); ret = migrate_pages(&cc->migratepages, - __alloc_contig_migrate_alloc, + alloc_migrate_target, 0, false, MIGRATE_SYNC); } diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 5f34a9053ce0..f2f5b4818e94 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -255,3 +255,14 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) spin_unlock_irqrestore(&zone->lock, flags); return ret ? 0 : -EBUSY; } + +struct page *alloc_migrate_target(struct page *page, unsigned long private, + int **resultp) +{ + gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; + + if (PageHighMem(page)) + gfp_mask |= __GFP_HIGHMEM; + + return alloc_page(gfp_mask); +} -- cgit v1.2.3 From 7f1290f2f2a4d2c3f1b7ce8e87256e052ca23125 Mon Sep 17 00:00:00 2001 From: Jianguo Wu Date: Mon, 8 Oct 2012 16:33:06 -0700 Subject: mm: fix-up zone present pages I think zone->present_pages indicates pages that buddy system can management, it should be: zone->present_pages = spanned pages - absent pages - bootmem pages, but is now: zone->present_pages = spanned pages - absent pages - memmap pages. spanned pages: total size, including holes. absent pages: holes. bootmem pages: pages used in system boot, managed by bootmem allocator. memmap pages: pages used by page structs. This may cause zone->present_pages less than it should be. For example, numa node 1 has ZONE_NORMAL and ZONE_MOVABLE, it's memmap and other bootmem will be allocated from ZONE_MOVABLE, so ZONE_NORMAL's present_pages should be spanned pages - absent pages, but now it also minus memmap pages(free_area_init_core), which are actually allocated from ZONE_MOVABLE. When offlining all memory of a zone, this will cause zone->present_pages less than 0, because present_pages is unsigned long type, it is actually a very large integer, it indirectly caused zone->watermark[WMARK_MIN] becomes a large integer(setup_per_zone_wmarks()), than cause totalreserve_pages become a large integer(calculate_totalreserve_pages()), and finally cause memory allocating failure when fork process(__vm_enough_memory()). [root@localhost ~]# dmesg -bash: fork: Cannot allocate memory I think the bug described in http://marc.info/?l=linux-mm&m=134502182714186&w=2 is also caused by wrong zone present pages. This patch intends to fix-up zone->present_pages when memory are freed to buddy system on x86_64 and IA64 platforms. Signed-off-by: Jianguo Wu Signed-off-by: Jiang Liu Reported-by: Petr Tesarik Tested-by: Petr Tesarik Cc: "Luck, Tony" Cc: Mel Gorman Cc: Yinghai Lu Cc: Minchan Kim Cc: Johannes Weiner Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/mm/init.c | 1 + include/linux/mm.h | 4 ++++ mm/bootmem.c | 10 +++++++++- mm/memory_hotplug.c | 7 +++++++ mm/nobootmem.c | 3 +++ mm/page_alloc.c | 34 ++++++++++++++++++++++++++++++++++ 6 files changed, 58 insertions(+), 1 deletion(-) (limited to 'mm/page_alloc.c') diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 082e383c1b6f..acd5b68e8871 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -637,6 +637,7 @@ mem_init (void) high_memory = __va(max_low_pfn * PAGE_SIZE); + reset_zone_present_pages(); for_each_online_pgdat(pgdat) if (pgdat->bdata->node_bootmem_map) totalram_pages += free_all_bootmem_node(pgdat); diff --git a/include/linux/mm.h b/include/linux/mm.h index bcaab4e6fe91..fa0680402738 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1684,5 +1684,9 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; } static inline bool page_is_guard(struct page *page) { return false; } #endif /* CONFIG_DEBUG_PAGEALLOC */ +extern void reset_zone_present_pages(void); +extern void fixup_zone_present_pages(int nid, unsigned long start_pfn, + unsigned long end_pfn); + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/mm/bootmem.c b/mm/bootmem.c index f468185b3b28..434be4ae7a04 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -198,6 +198,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) int order = ilog2(BITS_PER_LONG); __free_pages_bootmem(pfn_to_page(start), order); + fixup_zone_present_pages(page_to_nid(pfn_to_page(start)), + start, start + BITS_PER_LONG); count += BITS_PER_LONG; start += BITS_PER_LONG; } else { @@ -208,6 +210,9 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) if (vec & 1) { page = pfn_to_page(start + off); __free_pages_bootmem(page, 0); + fixup_zone_present_pages( + page_to_nid(page), + start + off, start + off + 1); count++; } vec >>= 1; @@ -221,8 +226,11 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) pages = bdata->node_low_pfn - bdata->node_min_pfn; pages = bootmem_bootmap_pages(pages); count += pages; - while (pages--) + while (pages--) { + fixup_zone_present_pages(page_to_nid(page), + page_to_pfn(page), page_to_pfn(page) + 1); __free_pages_bootmem(page++, 0); + } bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index f9ac0955e10a..ce690a911f1b 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -106,6 +106,7 @@ static void get_page_bootmem(unsigned long info, struct page *page, void __ref put_page_bootmem(struct page *page) { unsigned long type; + struct zone *zone; type = (unsigned long) page->lru.next; BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || @@ -116,6 +117,12 @@ void __ref put_page_bootmem(struct page *page) set_page_private(page, 0); INIT_LIST_HEAD(&page->lru); __free_pages_bootmem(page, 0); + + zone = page_zone(page); + zone_span_writelock(zone); + zone->present_pages++; + zone_span_writeunlock(zone); + totalram_pages++; } } diff --git a/mm/nobootmem.c b/mm/nobootmem.c index bd82f6b31411..714d5d650470 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -116,6 +116,8 @@ static unsigned long __init __free_memory_core(phys_addr_t start, return 0; __free_pages_memory(start_pfn, end_pfn); + fixup_zone_present_pages(pfn_to_nid(start >> PAGE_SHIFT), + start_pfn, end_pfn); return end_pfn - start_pfn; } @@ -126,6 +128,7 @@ unsigned long __init free_low_memory_core_early(int nodeid) phys_addr_t start, end, size; u64 i; + reset_zone_present_pages(); for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) count += __free_memory_core(start, end); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8ac593893e6e..00750bc08a3a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6087,3 +6087,37 @@ void dump_page(struct page *page) dump_page_flags(page->flags); mem_cgroup_print_bad_page(page); } + +/* reset zone->present_pages */ +void reset_zone_present_pages(void) +{ + struct zone *z; + int i, nid; + + for_each_node_state(nid, N_HIGH_MEMORY) { + for (i = 0; i < MAX_NR_ZONES; i++) { + z = NODE_DATA(nid)->node_zones + i; + z->present_pages = 0; + } + } +} + +/* calculate zone's present pages in buddy system */ +void fixup_zone_present_pages(int nid, unsigned long start_pfn, + unsigned long end_pfn) +{ + struct zone *z; + unsigned long zone_start_pfn, zone_end_pfn; + int i; + + for (i = 0; i < MAX_NR_ZONES; i++) { + z = NODE_DATA(nid)->node_zones + i; + zone_start_pfn = z->zone_start_pfn; + zone_end_pfn = zone_start_pfn + z->spanned_pages; + + /* if the two regions intersect */ + if (!(zone_start_pfn >= end_pfn || zone_end_pfn <= start_pfn)) + z->present_pages += min(end_pfn, zone_end_pfn) - + max(start_pfn, zone_start_pfn); + } +} -- cgit v1.2.3 From a0c5e813f087dffc0d9b173d2e7d3328b1482fd5 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 8 Oct 2012 16:33:21 -0700 Subject: mm: remove free_page_mlock We should not be seeing non-0 unevictable_pgs_mlockfreed any longer. So remove free_page_mlock() from the page freeing paths: __PG_MLOCKED is already in PAGE_FLAGS_CHECK_AT_FREE, so free_pages_check() will now be checking it, reporting "BUG: Bad page state" if it's ever found set. Comment UNEVICTABLE_MLOCKFREED and unevictable_pgs_mlockfreed always 0. Signed-off-by: Hugh Dickins Acked-by: Mel Gorman Cc: Rik van Riel Cc: Johannes Weiner Cc: Michel Lespinasse Cc: Ying Han Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vm_event_item.h | 2 +- mm/page_alloc.c | 17 ----------------- mm/vmstat.c | 2 +- 3 files changed, 2 insertions(+), 19 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 57f7b1091511..ede4ddd46695 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -52,7 +52,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, UNEVICTABLE_PGMUNLOCKED, UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ - UNEVICTABLE_MLOCKFREED, + UNEVICTABLE_MLOCKFREED, /* no longer useful: always zero */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE THP_FAULT_ALLOC, THP_FAULT_FALLBACK, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 00750bc08a3a..dbb53866c3aa 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -598,17 +598,6 @@ out: zone->free_area[order].nr_free++; } -/* - * free_page_mlock() -- clean up attempts to free and mlocked() page. - * Page should not be on lru, so no need to fix that up. - * free_pages_check() will verify... - */ -static inline void free_page_mlock(struct page *page) -{ - __dec_zone_page_state(page, NR_MLOCK); - __count_vm_event(UNEVICTABLE_MLOCKFREED); -} - static inline int free_pages_check(struct page *page) { if (unlikely(page_mapcount(page) | @@ -728,15 +717,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order) static void __free_pages_ok(struct page *page, unsigned int order) { unsigned long flags; - int wasMlocked = __TestClearPageMlocked(page); int migratetype; if (!free_pages_prepare(page, order)) return; local_irq_save(flags); - if (unlikely(wasMlocked)) - free_page_mlock(page); __count_vm_events(PGFREE, 1 << order); migratetype = get_pageblock_migratetype(page); set_freepage_migratetype(page, migratetype); @@ -1310,7 +1296,6 @@ void free_hot_cold_page(struct page *page, int cold) struct per_cpu_pages *pcp; unsigned long flags; int migratetype; - int wasMlocked = __TestClearPageMlocked(page); if (!free_pages_prepare(page, 0)) return; @@ -1318,8 +1303,6 @@ void free_hot_cold_page(struct page *page, int cold) migratetype = get_pageblock_migratetype(page); set_freepage_migratetype(page, migratetype); local_irq_save(flags); - if (unlikely(wasMlocked)) - free_page_mlock(page); __count_vm_event(PGFREE); /* diff --git a/mm/vmstat.c b/mm/vmstat.c index acbd85c983e6..05e3a9913744 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -782,7 +782,7 @@ const char * const vmstat_text[] = { "unevictable_pgs_munlocked", "unevictable_pgs_cleared", "unevictable_pgs_stranded", - "unevictable_pgs_mlockfreed", + "unevictable_pgs_mlockfreed", /* no longer useful: always zero */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE "thp_fault_alloc", -- cgit v1.2.3 From 957f822a0ab95e88b146638bad6209bbc315bedd Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 8 Oct 2012 16:33:24 -0700 Subject: mm, numa: reclaim from all nodes within reclaim distance RECLAIM_DISTANCE represents the distance between nodes at which it is deemed too costly to allocate from; it's preferred to try to reclaim from a local zone before falling back to allocating on a remote node with such a distance. To do this, zone_reclaim_mode is set if the distance between any two nodes on the system is greather than this distance. This, however, ends up causing the page allocator to reclaim from every zone regardless of its affinity. What we really want is to reclaim only from zones that are closer than RECLAIM_DISTANCE. This patch adds a nodemask to each node that represents the set of nodes that are within this distance. During the zone iteration, if the bit for a zone's node is set for the local node, then reclaim is attempted; otherwise, the zone is skipped. [akpm@linux-foundation.org: fix CONFIG_NUMA=n build] Signed-off-by: David Rientjes Cc: Mel Gorman Cc: Minchan Kim Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 1 + mm/page_alloc.c | 41 ++++++++++++++++++++++++++++++----------- 2 files changed, 31 insertions(+), 11 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d240efa8f846..a5578871d033 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -709,6 +709,7 @@ typedef struct pglist_data { unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; + nodemask_t reclaim_nodes; /* Nodes allowed to reclaim from */ wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dbb53866c3aa..9b8e6243a524 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1799,6 +1799,22 @@ static void zlc_clear_zones_full(struct zonelist *zonelist) bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); } +static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) +{ + return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes); +} + +static void __paginginit init_zone_allows_reclaim(int nid) +{ + int i; + + for_each_online_node(i) + if (node_distance(nid, i) <= RECLAIM_DISTANCE) { + node_set(i, NODE_DATA(nid)->reclaim_nodes); + zone_reclaim_mode = 1; + } +} + #else /* CONFIG_NUMA */ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) @@ -1819,6 +1835,15 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) static void zlc_clear_zones_full(struct zonelist *zonelist) { } + +static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) +{ + return true; +} + +static inline void init_zone_allows_reclaim(int nid) +{ +} #endif /* CONFIG_NUMA */ /* @@ -1903,7 +1928,8 @@ zonelist_scan: did_zlc_setup = 1; } - if (zone_reclaim_mode == 0) + if (zone_reclaim_mode == 0 || + !zone_allows_reclaim(preferred_zone, zone)) goto this_zone_full; /* @@ -3364,21 +3390,13 @@ static void build_zonelists(pg_data_t *pgdat) j = 0; while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { - int distance = node_distance(local_node, node); - - /* - * If another node is sufficiently far away then it is better - * to reclaim pages in a zone before going off node. - */ - if (distance > RECLAIM_DISTANCE) - zone_reclaim_mode = 1; - /* * We don't want to pressure a particular node. * So adding penalty to the first node in same * distance group to make it round-robin. */ - if (distance != node_distance(local_node, prev_node)) + if (node_distance(local_node, node) != + node_distance(local_node, prev_node)) node_load[node] = load; prev_node = node; @@ -4552,6 +4570,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, pgdat->node_id = nid; pgdat->node_start_pfn = node_start_pfn; + init_zone_allows_reclaim(nid); calculate_node_totalpages(pgdat, zones_size, zholes_size); alloc_node_mem_map(pgdat); -- cgit v1.2.3 From 5a883813845a2bb5ed2bd8c9240736c0740b156f Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Mon, 8 Oct 2012 16:33:39 -0700 Subject: memory-hotplug: fix zone stat mismatch During memory-hotplug, I found NR_ISOLATED_[ANON|FILE] are increasing, causing the kernel to hang. When the system doesn't have enough free pages, it enters reclaim but never reclaim any pages due to too_many_isolated()==true and loops forever. The cause is that when we do memory-hotadd after memory-remove, __zone_pcp_update() clears a zone's ZONE_STAT_ITEMS in setup_pageset() although the vm_stat_diff of all CPUs still have values. In addtion, when we offline all pages of the zone, we reset them in zone_pcp_reset without draining so we loss some zone stat item. Reviewed-by: Wen Congyang Signed-off-by: Minchan Kim Cc: Kamezawa Hiroyuki Cc: Yasuaki Ishimatsu Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmstat.h | 4 ++++ mm/page_alloc.c | 7 +++++++ mm/vmstat.c | 12 ++++++++++++ 3 files changed, 23 insertions(+) (limited to 'mm/page_alloc.c') diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index a5bb15018b5c..92a86b2cce33 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -198,6 +198,8 @@ extern void __dec_zone_state(struct zone *, enum zone_stat_item); void refresh_cpu_vm_stats(int); void refresh_zone_stat_thresholds(void); +void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); + int calculate_pressure_threshold(struct zone *zone); int calculate_normal_threshold(struct zone *zone); void set_pgdat_percpu_threshold(pg_data_t *pgdat, @@ -251,6 +253,8 @@ static inline void __dec_zone_page_state(struct page *page, static inline void refresh_cpu_vm_stats(int cpu) { } static inline void refresh_zone_stat_thresholds(void) { } +static inline void drain_zonestat(struct zone *zone, + struct per_cpu_pageset *pset) { } #endif /* CONFIG_SMP */ static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9b8e6243a524..5485f0ef4ec3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5916,6 +5916,7 @@ static int __meminit __zone_pcp_update(void *data) local_irq_save(flags); if (pcp->count > 0) free_pcppages_bulk(zone, pcp->count, pcp); + drain_zonestat(zone, pset); setup_pageset(pset, batch); local_irq_restore(flags); } @@ -5932,10 +5933,16 @@ void __meminit zone_pcp_update(struct zone *zone) void zone_pcp_reset(struct zone *zone) { unsigned long flags; + int cpu; + struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ local_irq_save(flags); if (zone->pageset != &boot_pageset) { + for_each_online_cpu(cpu) { + pset = per_cpu_ptr(zone->pageset, cpu); + drain_zonestat(zone, pset); + } free_percpu(zone->pageset); zone->pageset = &boot_pageset; } diff --git a/mm/vmstat.c b/mm/vmstat.c index 05e3a9913744..2f11309955cc 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -495,6 +495,18 @@ void refresh_cpu_vm_stats(int cpu) atomic_long_add(global_diff[i], &vm_stat[i]); } +void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) +{ + int i; + + for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) + if (pset->vm_stat_diff[i]) { + int v = pset->vm_stat_diff[i]; + pset->vm_stat_diff[i] = 0; + atomic_long_add(v, &zone->vm_stat[i]); + atomic_long_add(v, &vm_stat[i]); + } +} #endif #ifdef CONFIG_NUMA -- cgit v1.2.3 From e46a28790e594c0876d1a84270926abf75460f61 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Mon, 8 Oct 2012 16:33:48 -0700 Subject: CMA: migrate mlocked pages Presently CMA cannot migrate mlocked pages so it ends up failing to allocate contiguous memory space. This patch makes mlocked pages be migrated out. Of course, it can affect realtime processes but in CMA usecase, contiguous memory allocation failing is far worse than access latency to an mlocked page being variable while CMA is running. If someone wants to make the system realtime, he shouldn't enable CMA because stalls can still happen at random times. [akpm@linux-foundation.org: tweak comment text, per Mel] Signed-off-by: Minchan Kim Acked-by: Mel Gorman Cc: Michal Nazarewicz Cc: Bartlomiej Zolnierkiewicz Cc: Marek Szyprowski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 2 ++ mm/compaction.c | 8 ++++++-- mm/internal.h | 2 +- mm/page_alloc.c | 2 +- mm/vmscan.c | 4 ++-- 5 files changed, 12 insertions(+), 6 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a5578871d033..50aaca81f63d 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -218,6 +218,8 @@ struct lruvec { #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) /* Isolate for asynchronous migration */ #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) +/* Isolate unevictable pages */ +#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) /* LRU Isolation modes. */ typedef unsigned __bitwise__ isolate_mode_t; diff --git a/mm/compaction.c b/mm/compaction.c index d8187f9cabbf..2c4ce17651d8 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -461,6 +461,7 @@ static bool too_many_isolated(struct zone *zone) * @cc: Compaction control structure. * @low_pfn: The first PFN of the range. * @end_pfn: The one-past-the-last PFN of the range. + * @unevictable: true if it allows to isolate unevictable pages * * Isolate all pages that can be migrated from the range specified by * [low_pfn, end_pfn). Returns zero if there is a fatal signal @@ -476,7 +477,7 @@ static bool too_many_isolated(struct zone *zone) */ unsigned long isolate_migratepages_range(struct zone *zone, struct compact_control *cc, - unsigned long low_pfn, unsigned long end_pfn) + unsigned long low_pfn, unsigned long end_pfn, bool unevictable) { unsigned long last_pageblock_nr = 0, pageblock_nr; unsigned long nr_scanned = 0, nr_isolated = 0; @@ -602,6 +603,9 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, if (!cc->sync) mode |= ISOLATE_ASYNC_MIGRATE; + if (unevictable) + mode |= ISOLATE_UNEVICTABLE; + lruvec = mem_cgroup_page_lruvec(page, zone); /* Try isolate the page */ @@ -807,7 +811,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, } /* Perform the isolation */ - low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn); + low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false); if (!low_pfn || cc->contended) return ISOLATE_ABORT; diff --git a/mm/internal.h b/mm/internal.h index 4dc93e2fe69e..f5f295fe11e1 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -138,7 +138,7 @@ isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn); unsigned long isolate_migratepages_range(struct zone *zone, struct compact_control *cc, - unsigned long low_pfn, unsigned long end_pfn); + unsigned long low_pfn, unsigned long end_pfn, bool unevictable); #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5485f0ef4ec3..fd86c47de86f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5690,7 +5690,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, if (list_empty(&cc->migratepages)) { cc->nr_migratepages = 0; pfn = isolate_migratepages_range(cc->zone, cc, - pfn, end); + pfn, end, true); if (!pfn) { ret = -EINTR; break; diff --git a/mm/vmscan.c b/mm/vmscan.c index 8b627309dd44..2624edcfb420 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1009,8 +1009,8 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode) if (!PageLRU(page)) return ret; - /* Do not give back unevictable pages for compaction */ - if (PageUnevictable(page)) + /* Compaction should not handle unevictable pages but CMA can do so */ + if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) return ret; ret = -EBUSY; -- cgit v1.2.3 From beb51eaa88238daba698ad837222ad277d440b6d Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Mon, 8 Oct 2012 16:33:51 -0700 Subject: cma: decrease cc.nr_migratepages after reclaiming pagelist reclaim_clean_pages_from_list() reclaims clean pages before migration so cc.nr_migratepages should be updated. Currently, there is no problem but it can be wrong if we try to use the value in future. Signed-off-by: Minchan Kim Acked-by: Mel Gorman Cc: Michal Nazarewicz Cc: Bartlomiej Zolnierkiewicz Cc: Marek Szyprowski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fd86c47de86f..bb90971182bd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5674,7 +5674,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, unsigned long start, unsigned long end) { /* This function is based on compact_zone() from compaction.c. */ - + unsigned long nr_reclaimed; unsigned long pfn = start; unsigned int tries = 0; int ret = 0; @@ -5701,7 +5701,9 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, break; } - reclaim_clean_pages_from_list(cc->zone, &cc->migratepages); + nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, + &cc->migratepages); + cc->nr_migratepages -= nr_reclaimed; ret = migrate_pages(&cc->migratepages, alloc_migrate_target, -- cgit v1.2.3 From 86a595f961360c9c31d00fb111dd09e5d5ba9a40 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Thu, 25 Oct 2012 13:37:56 -0700 Subject: mm/page_alloc.c:alloc_contig_range(): return early for err path If start_isolate_page_range() failed, unset_migratetype_isolate() has been done inside it. Signed-off-by: Bob Liu Cc: Ni zhan Chen Cc: Marek Szyprowski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bb90971182bd..b0012ab372a4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5825,7 +5825,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, ret = start_isolate_page_range(pfn_max_align_down(start), pfn_max_align_up(end), migratetype); if (ret) - goto done; + return ret; ret = __alloc_contig_migrate_range(&cc, start, end); if (ret) -- cgit v1.2.3 From 6b187d0260b6cd1d0904309f32659b7ed5948af8 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 25 Oct 2012 13:38:08 -0700 Subject: mm, numa: avoid setting zone_reclaim_mode unless a node is sufficiently distant Commit 957f822a0ab9 ("mm, numa: reclaim from all nodes within reclaim distance") caused zone_reclaim_mode to be set for all systems where two nodes are within RECLAIM_DISTANCE of each other. This is the opposite of what we actually want: zone_reclaim_mode should be set if two nodes are sufficiently distant. Signed-off-by: David Rientjes Reported-by: Julian Wollrath Tested-by: Julian Wollrath Cc: Hugh Dickins Cc: Patrik Kullman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b0012ab372a4..5b74de6702e0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1809,10 +1809,10 @@ static void __paginginit init_zone_allows_reclaim(int nid) int i; for_each_online_node(i) - if (node_distance(nid, i) <= RECLAIM_DISTANCE) { + if (node_distance(nid, i) <= RECLAIM_DISTANCE) node_set(i, NODE_DATA(nid)->reclaim_nodes); + else zone_reclaim_mode = 1; - } } #else /* CONFIG_NUMA */ -- cgit v1.2.3