summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-06-10 16:24:03 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-10 16:24:03 +0200
commit5c82b12f39db91f2e20d5bdd28c0a528add9c785 (patch)
tree18df857dcf948969f23ce3708ac8b7ab5722d4d3 /mm
parentd143282f685a850b91f07704824bb5b005ddd3af (diff)
parent23ee1c755e51c788ca8d3f9d9fa1072a07426820 (diff)
Merge branch 'x86/mpparse' into auto-x86-next
Conflicts: arch/x86/Kconfig arch/x86/kernel/setup.c arch/x86/kernel/setup_64.c
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c49
1 files changed, 39 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8e83f02cd2d3..c7f1ff133371 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3490,6 +3490,11 @@ void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
calculate_node_totalpages(pgdat, zones_size, zholes_size);
alloc_node_mem_map(pgdat);
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
+ printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
+ nid, (unsigned long)pgdat,
+ (unsigned long)pgdat->node_mem_map);
+#endif
free_area_init_core(pgdat, zones_size, zholes_size);
}
@@ -3578,25 +3583,49 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
/**
* shrink_active_range - Shrink an existing registered range of PFNs
* @nid: The node id the range is on that should be shrunk
- * @old_end_pfn: The old end PFN of the range
* @new_end_pfn: The new PFN of the range
*
* i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
- * The map is kept at the end physical page range that has already been
- * registered with add_active_range(). This function allows an arch to shrink
- * an existing registered range.
+ * The map is kept near the end physical page range that has already been
+ * registered. This function allows an arch to shrink an existing registered
+ * range.
*/
-void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
- unsigned long new_end_pfn)
+void __init shrink_active_range(unsigned int nid, unsigned long new_end_pfn)
{
- int i;
+ int i, j;
+ int removed = 0;
/* Find the old active region end and shrink */
- for_each_active_range_index_in_nid(i, nid)
- if (early_node_map[i].end_pfn == old_end_pfn) {
+ for_each_active_range_index_in_nid(i, nid) {
+ if (early_node_map[i].start_pfn >= new_end_pfn) {
+ /* clear it */
+ early_node_map[i].end_pfn = 0;
+ removed = 1;
+ continue;
+ }
+ if (early_node_map[i].end_pfn > new_end_pfn) {
early_node_map[i].end_pfn = new_end_pfn;
- break;
+ continue;
}
+ }
+
+ if (!removed)
+ return;
+
+ /* remove the blank ones */
+ for (i = nr_nodemap_entries - 1; i > 0; i--) {
+ if (early_node_map[i].nid != nid)
+ continue;
+ if (early_node_map[i].end_pfn)
+ continue;
+ /* we found it, get rid of it */
+ for (j = i; j < nr_nodemap_entries - 1; j++)
+ memcpy(&early_node_map[j], &early_node_map[j+1],
+ sizeof(early_node_map[j]));
+ j = nr_nodemap_entries - 1;
+ memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
+ nr_nodemap_entries--;
+ }
}
/**