summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-11 11:19:20 +0200
committerIngo Molnar <mingo@elte.hu>2008-08-11 11:19:20 +0200
commit8067794bec1cc5de1431102cf0a6a1c7ce75cd85 (patch)
tree2bfa0fba060d253bbd972282b29a3d60c3e7cb7f /mm/hugetlb.c
parent7ab6af7ab69df8c9c5fbc380004fb81187742096 (diff)
parent796aadeb1b2db9b5d463946766c5bbfd7717158c (diff)
Merge branch 'linus' into x86/x2apic
Conflicts: arch/x86/kernel/genapic_64.c Manual merge: arch/x86/kernel/genx2apic_uv_x.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 41341c414194..757ca983fd99 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
+#include <linux/mmu_notifier.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
@@ -19,6 +20,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/io.h>
#include <linux/hugetlb.h>
#include "internal.h"
@@ -1026,7 +1028,6 @@ static void __init report_hugepages(void)
}
}
-#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(struct hstate *h, unsigned long count)
{
@@ -1282,7 +1283,12 @@ module_exit(hugetlb_exit);
static int __init hugetlb_init(void)
{
- BUILD_BUG_ON(HPAGE_SHIFT == 0);
+ /* Some platform decide whether they support huge pages at boot
+ * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
+ * there is no such support
+ */
+ if (HPAGE_SHIFT == 0)
+ return 0;
if (!size_to_hstate(default_hstate_size)) {
default_hstate_size = HPAGE_SIZE;
@@ -1386,6 +1392,7 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
return nr;
}
+#ifdef CONFIG_SYSCTL
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer,
size_t *length, loff_t *ppos)
@@ -1672,6 +1679,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
BUG_ON(start & ~huge_page_mask(h));
BUG_ON(end & ~huge_page_mask(h));
+ mmu_notifier_invalidate_range_start(mm, start, end);
spin_lock(&mm->page_table_lock);
for (address = start; address < end; address += sz) {
ptep = huge_pte_offset(mm, address);
@@ -1713,6 +1721,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
}
spin_unlock(&mm->page_table_lock);
flush_tlb_range(vma, start, end);
+ mmu_notifier_invalidate_range_end(mm, start, end);
list_for_each_entry_safe(page, tmp, &page_list, lru) {
list_del(&page->lru);
put_page(page);