summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2015-01-17 11:23:48 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2015-01-17 11:23:48 +1100
commit25a23098055050b3f8e3e22fe4167869ab4d8138 (patch)
tree184663072a12244d10acf5a11dad7361b637bd6a /mm/vmscan.c
parent3611badc1baa6189e10cb66f58abc016cf51469b (diff)
mm-vmscan-fix-the-page-state-calculation-in-too_many_isolated-fix
Move the zone_page_state_snapshot() fallback logic into too_many_isolated(), so shrink_inactive_list() doesn't incorrectly call congestion_wait(). Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Cc: Vinayak Menon <vinmenon@codeaurora.org> Cc: Vladimir Davydov <vdavydov@parallels.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 47e42bf1dc5d..175a145bd73a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1402,7 +1402,7 @@ int isolate_lru_page(struct page *page)
}
static int __too_many_isolated(struct zone *zone, int file,
- struct scan_control *sc, int safe)
+ struct scan_control *sc, int safe)
{
unsigned long inactive, isolated;
@@ -1435,7 +1435,7 @@ static int __too_many_isolated(struct zone *zone, int file,
* unnecessary swapping, thrashing and OOM.
*/
static int too_many_isolated(struct zone *zone, int file,
- struct scan_control *sc, int safe)
+ struct scan_control *sc)
{
if (current_is_kswapd())
return 0;
@@ -1443,12 +1443,14 @@ static int too_many_isolated(struct zone *zone, int file,
if (!global_reclaim(sc))
return 0;
- if (unlikely(__too_many_isolated(zone, file, sc, 0))) {
- if (safe)
- return __too_many_isolated(zone, file, sc, safe);
- else
- return 1;
- }
+ /*
+ * __too_many_isolated(safe=0) is fast but inaccurate, because it
+ * doesn't account for the vm_stat_diff[] counters. So if it looks
+ * like too_many_isolated() is about to return true, fall back to the
+ * slower, more accurate zone_page_state_snapshot().
+ */
+ if (unlikely(__too_many_isolated(zone, file, sc, 0)))
+ return __too_many_isolated(zone, file, sc, 1);
return 0;
}
@@ -1540,18 +1542,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
unsigned long nr_immediate = 0;
isolate_mode_t isolate_mode = 0;
int file = is_file_lru(lru);
- int safe = 0;
struct zone *zone = lruvec_zone(lruvec);
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
- while (unlikely(too_many_isolated(zone, file, sc, safe))) {
+ while (unlikely(too_many_isolated(zone, file, sc))) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
/* We are about to die and free our memory. Return now. */
if (fatal_signal_pending(current))
return SWAP_CLUSTER_MAX;
-
- safe = 1;
}
lru_add_drain();