summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2017-09-06 16:20:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 17:27:25 -0700
commitafb6ebb3faeb382f7c8b4478f2a84cee37bb8610 (patch)
treef37b6eb6cab9a0957b682d21a22fc46818c90e93 /mm
parentc9bff3eebc09be23fbc868f5e6731666d23cbea3 (diff)
mm, page_alloc: remove boot pageset initialization from memory hotplug
boot_pageset is a boot time hack which gets superseded by normal pagesets later in the boot process. It makes zero sense to reinitialize it again and again during memory hotplug. Link: http://lkml.kernel.org/r/20170721143915.14161-3-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Mel Gorman <mgorman@suse.de> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <js1304@gmail.com> Cc: Shaohua Li <shaohua.li@intel.com> Cc: Toshi Kani <toshi.kani@hpe.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6b23df1be909..94e64784a8be 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5141,7 +5141,7 @@ DEFINE_MUTEX(zonelists_mutex);
static int __build_all_zonelists(void *data)
{
int nid;
- int cpu;
+ int __maybe_unused cpu;
pg_data_t *self = data;
#ifdef CONFIG_NUMA
@@ -5162,23 +5162,8 @@ static int __build_all_zonelists(void *data)
}
}
- /*
- * Initialize the boot_pagesets that are going to be used
- * for bootstrapping processors. The real pagesets for
- * each zone will be allocated later when the per cpu
- * allocator is available.
- *
- * boot_pagesets are used also for bootstrapping offline
- * cpus if the system is already booted because the pagesets
- * are needed to initialize allocators on a specific cpu too.
- * F.e. the percpu allocator needs the page allocator which
- * needs the percpu allocator in order to allocate its pagesets
- * (a chicken-egg dilemma).
- */
- for_each_possible_cpu(cpu) {
- setup_pageset(&per_cpu(boot_pageset, cpu), 0);
-
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
+ for_each_possible_cpu(cpu) {
/*
* We now know the "local memory node" for each node--
* i.e., the node of the first zone in the generic zonelist.
@@ -5189,8 +5174,8 @@ static int __build_all_zonelists(void *data)
*/
if (cpu_online(cpu))
set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
-#endif
}
+#endif
return 0;
}
@@ -5198,7 +5183,26 @@ static int __build_all_zonelists(void *data)
static noinline void __init
build_all_zonelists_init(void)
{
+ int cpu;
+
__build_all_zonelists(NULL);
+
+ /*
+ * Initialize the boot_pagesets that are going to be used
+ * for bootstrapping processors. The real pagesets for
+ * each zone will be allocated later when the per cpu
+ * allocator is available.
+ *
+ * boot_pagesets are used also for bootstrapping offline
+ * cpus if the system is already booted because the pagesets
+ * are needed to initialize allocators on a specific cpu too.
+ * F.e. the percpu allocator needs the page allocator which
+ * needs the percpu allocator in order to allocate its pagesets
+ * (a chicken-egg dilemma).
+ */
+ for_each_possible_cpu(cpu)
+ setup_pageset(&per_cpu(boot_pageset, cpu), 0);
+
mminit_verify_zonelist();
cpuset_init_current_mems_allowed();
}