summaryrefslogtreecommitdiff
path: root/arch/x86/mm/init.c
diff options
context:
space:
mode:
authorMichal Marek <mmarek@suse.cz>2011-06-07 15:37:51 +0200
committerMichal Marek <mmarek@suse.cz>2011-06-07 15:37:51 +0200
commit2e483528cebad089d0bb3f9aebb0ada22d968ffa (patch)
treed701405826b271e819a9a8500838cebd37b1364a /arch/x86/mm/init.c
parent163d3fe6a2357aba7b18b938d6ae6ce9570324e4 (diff)
parent55922c9d1b84b89cb946c777fddccb3247e7df2c (diff)
Merge commit 'v3.0-rc1' into kbuild/kbuild
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r--arch/x86/mm/init.c26
1 files changed, 22 insertions, 4 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 286d289b039b..30326443ab81 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -16,8 +16,6 @@
#include <asm/tlb.h>
#include <asm/proto.h>
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
unsigned long __initdata pgt_buf_start;
unsigned long __meminitdata pgt_buf_end;
unsigned long __meminitdata pgt_buf_top;
@@ -81,6 +79,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
}
+void __init native_pagetable_reserve(u64 start, u64 end)
+{
+ memblock_x86_reserve_range(start, end, "PGTABLE");
+}
+
struct map_range {
unsigned long start;
unsigned long end;
@@ -272,9 +275,24 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
__flush_tlb_all();
+ /*
+ * Reserve the kernel pagetable pages we used (pgt_buf_start -
+ * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
+ * so that they can be reused for other purposes.
+ *
+ * On native it just means calling memblock_x86_reserve_range, on Xen it
+ * also means marking RW the pagetable pages that we allocated before
+ * but that haven't been used.
+ *
+ * In fact on xen we mark RO the whole range pgt_buf_start -
+ * pgt_buf_top, because we have to make sure that when
+ * init_memory_mapping reaches the pagetable pages area, it maps
+ * RO all the pagetable pages, including the ones that are beyond
+ * pgt_buf_end at that time.
+ */
if (!after_bootmem && pgt_buf_end > pgt_buf_start)
- memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT,
- pgt_buf_end << PAGE_SHIFT, "PGTABLE");
+ x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start),
+ PFN_PHYS(pgt_buf_end));
if (!after_bootmem)
early_memtest(start, end);