summaryrefslogtreecommitdiff
path: root/mm/compaction.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c81
1 files changed, 34 insertions, 47 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 4add68d40e8d..f146478b01bc 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1365,12 +1365,14 @@ static bool suitable_migration_target(struct compact_control *cc,
{
/* If the page is a large free page, then disallow migration */
if (PageBuddy(page)) {
+ int order = cc->order > 0 ? cc->order : pageblock_order;
+
/*
* We are checking page_order without zone->lock taken. But
* the only small danger is that we skip a potentially suitable
* pageblock, so it's not worth to check order for valid range.
*/
- if (buddy_order_unsafe(page) >= pageblock_order)
+ if (buddy_order_unsafe(page) >= order)
return false;
}
@@ -1796,6 +1798,7 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data)
dst = list_entry(cc->freepages.next, struct folio, lru);
list_del(&dst->lru);
cc->nr_freepages--;
+ cc->nr_migratepages--;
return dst;
}
@@ -1811,6 +1814,7 @@ static void compaction_free(struct folio *dst, unsigned long data)
list_add(&dst->lru, &cc->freepages);
cc->nr_freepages++;
+ cc->nr_migratepages++;
}
/* possible outcome of isolate_migratepages */
@@ -2433,7 +2437,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
unsigned long last_migrated_pfn;
const bool sync = cc->mode != MIGRATE_ASYNC;
bool update_cached;
- unsigned int nr_succeeded = 0;
+ unsigned int nr_succeeded = 0, nr_migratepages;
/*
* These counters track activities during zone compaction. Initialize
@@ -2551,11 +2555,17 @@ rescan:
pageblock_start_pfn(cc->migrate_pfn - 1));
}
+ /*
+ * Record the number of pages to migrate since the
+ * compaction_alloc/free() will update cc->nr_migratepages
+ * properly.
+ */
+ nr_migratepages = cc->nr_migratepages;
err = migrate_pages(&cc->migratepages, compaction_alloc,
compaction_free, (unsigned long)cc, cc->mode,
MR_COMPACTION, &nr_succeeded);
- trace_mm_compaction_migratepages(cc, nr_succeeded);
+ trace_mm_compaction_migratepages(nr_migratepages, nr_succeeded);
/* All pages were either migrated or will be released */
cc->nr_migratepages = 0;
@@ -2788,25 +2798,27 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
}
/*
- * Compact all zones within a node till each zone's fragmentation score
- * reaches within proactive compaction thresholds (as determined by the
- * proactiveness tunable).
+ * compact_node() - compact all zones within a node
+ * @pgdat: The node page data
+ * @proactive: Whether the compaction is proactive
*
- * It is possible that the function returns before reaching score targets
- * due to various back-off conditions, such as, contention on per-node or
- * per-zone locks.
+ * For proactive compaction, compact till each zone's fragmentation score
+ * reaches within proactive compaction thresholds (as determined by the
+ * proactiveness tunable), it is possible that the function returns before
+ * reaching score targets due to various back-off conditions, such as,
+ * contention on per-node or per-zone locks.
*/
-static void proactive_compact_node(pg_data_t *pgdat)
+static void compact_node(pg_data_t *pgdat, bool proactive)
{
int zoneid;
struct zone *zone;
struct compact_control cc = {
.order = -1,
- .mode = MIGRATE_SYNC_LIGHT,
+ .mode = proactive ? MIGRATE_SYNC_LIGHT : MIGRATE_SYNC,
.ignore_skip_hint = true,
.whole_zone = true,
.gfp_mask = GFP_KERNEL,
- .proactive_compaction = true,
+ .proactive_compaction = proactive,
};
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
@@ -2818,41 +2830,16 @@ static void proactive_compact_node(pg_data_t *pgdat)
compact_zone(&cc, NULL);
- count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
- cc.total_migrate_scanned);
- count_compact_events(KCOMPACTD_FREE_SCANNED,
- cc.total_free_scanned);
- }
-}
-
-/* Compact all zones within a node */
-static void compact_node(int nid)
-{
- pg_data_t *pgdat = NODE_DATA(nid);
- int zoneid;
- struct zone *zone;
- struct compact_control cc = {
- .order = -1,
- .mode = MIGRATE_SYNC,
- .ignore_skip_hint = true,
- .whole_zone = true,
- .gfp_mask = GFP_KERNEL,
- };
-
-
- for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
-
- zone = &pgdat->node_zones[zoneid];
- if (!populated_zone(zone))
- continue;
-
- cc.zone = zone;
-
- compact_zone(&cc, NULL);
+ if (proactive) {
+ count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
+ cc.total_migrate_scanned);
+ count_compact_events(KCOMPACTD_FREE_SCANNED,
+ cc.total_free_scanned);
+ }
}
}
-/* Compact all nodes in the system */
+/* Compact all zones of all nodes in the system */
static void compact_nodes(void)
{
int nid;
@@ -2861,7 +2848,7 @@ static void compact_nodes(void)
lru_add_drain_all();
for_each_online_node(nid)
- compact_node(nid);
+ compact_node(NODE_DATA(nid), false);
}
static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write,
@@ -2923,7 +2910,7 @@ static ssize_t compact_store(struct device *dev,
/* Flush pending updates to the LRU lists */
lru_add_drain_all();
- compact_node(nid);
+ compact_node(NODE_DATA(nid), false);
}
return count;
@@ -3132,7 +3119,7 @@ static int kcompactd(void *p)
unsigned int prev_score, score;
prev_score = fragmentation_score_node(pgdat);
- proactive_compact_node(pgdat);
+ compact_node(pgdat, true);
score = fragmentation_score_node(pgdat);
/*
* Defer proactive compaction if the fragmentation