diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2012-08-13 15:39:10 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2012-08-13 15:39:10 -0400 |
commit | 0980bd9cd32de2fef7eaa2858345c49d14498625 (patch) | |
tree | 41f5f823d0569a81b22037e79c22d823933a63f1 /include/linux/hugetlb.h | |
parent | 78821b2c0299ab807d483802f09897728b93bce0 (diff) | |
parent | 0d7614f09c1ebdbaa1599a5aba7593f147bf96ee (diff) |
Merge commit 'v3.6-rc1' into linux-next
* commit 'v3.6-rc1': (9532 commits)
Linux 3.6-rc1
mm: remove node_start_pfn checking in new WARN_ON for now
ARM: mmp: add missing irqs.h
arm: mvebu: fix typo in .dtsi comment for Armada XP SoCs
ARM: PRIMA2: delete redundant codes to restore LATCHED when timer resumes
libceph: fix crypto key null deref, memory leak
ceph: simplify+fix atomic_open
sh: explicitly include sh_dma.h in setup-sh7722.c
um: Add arch/x86/um to MAINTAINERS
um: pass siginfo to guest process
um: fix ubd_file_size for read-only files
md/dm-raid: DM_RAID should select MD_RAID10
md/raid1: submit IO from originating thread instead of md thread.
raid5: raid5d handle stripe in batch way
raid5: make_request use batch stripe release
um: pull interrupt_end() into userspace()
um: split syscall_trace(), pass pt_regs to it
um: switch UPT_SET_RETURN_VALUE and regs_return_value to pt_regs
MIPS: Loongson 2: Sort out clock managment.
locks: remove unused lm_release_private
...
Diffstat (limited to 'include/linux/hugetlb.h')
-rw-r--r-- | include/linux/hugetlb.h | 50 |
1 files changed, 45 insertions, 5 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d5d6bbe2259e..225164842ab6 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -4,9 +4,11 @@ #include <linux/mm_types.h> #include <linux/fs.h> #include <linux/hugetlb_inline.h> +#include <linux/cgroup.h> struct ctl_table; struct user_struct; +struct mmu_gather; #ifdef CONFIG_HUGETLB_PAGE @@ -20,6 +22,11 @@ struct hugepage_subpool { long max_hpages, used_hpages; }; +extern spinlock_t hugetlb_lock; +extern int hugetlb_max_hstate __read_mostly; +#define for_each_hstate(h) \ + for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) + struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); void hugepage_put_subpool(struct hugepage_subpool *spool); @@ -40,9 +47,14 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, unsigned int flags); void unmap_hugepage_range(struct vm_area_struct *, - unsigned long, unsigned long, struct page *); -void __unmap_hugepage_range(struct vm_area_struct *, - unsigned long, unsigned long, struct page *); + unsigned long, unsigned long, struct page *); +void __unmap_hugepage_range_final(struct mmu_gather *tlb, + struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct page *ref_page); +void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct page *ref_page); int hugetlb_prefault(struct address_space *, struct vm_area_struct *); void hugetlb_report_meminfo(struct seq_file *); int hugetlb_report_node_meminfo(int, char *); @@ -98,7 +110,6 @@ static inline unsigned long hugetlb_total_pages(void) #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) -#define unmap_hugepage_range(vma, start, end, page) BUG() static inline void hugetlb_report_meminfo(struct seq_file *m) { } @@ -112,13 +123,31 @@ static inline void hugetlb_report_meminfo(struct seq_file *m) #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) #define huge_pte_offset(mm, address) 0 -#define dequeue_hwpoisoned_huge_page(page) 0 +static inline int dequeue_hwpoisoned_huge_page(struct page *page) +{ + return 0; +} + static inline void copy_huge_page(struct page *dst, struct page *src) { } #define hugetlb_change_protection(vma, address, end, newprot) +static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, + struct vm_area_struct *vma, unsigned long start, + unsigned long end, struct page *ref_page) +{ + BUG(); +} + +static inline void __unmap_hugepage_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, unsigned long start, + unsigned long end, struct page *ref_page) +{ + BUG(); +} + #endif /* !CONFIG_HUGETLB_PAGE */ #define HUGETLB_ANON_FILE "anon_hugepage" @@ -199,10 +228,15 @@ struct hstate { unsigned long resv_huge_pages; unsigned long surplus_huge_pages; unsigned long nr_overcommit_huge_pages; + struct list_head hugepage_activelist; struct list_head hugepage_freelists[MAX_NUMNODES]; unsigned int nr_huge_pages_node[MAX_NUMNODES]; unsigned int free_huge_pages_node[MAX_NUMNODES]; unsigned int surplus_huge_pages_node[MAX_NUMNODES]; +#ifdef CONFIG_CGROUP_HUGETLB + /* cgroup control files */ + struct cftype cgroup_files[5]; +#endif char name[HSTATE_NAME_LEN]; }; @@ -302,6 +336,11 @@ static inline unsigned hstate_index_to_shift(unsigned index) return hstates[index].order + PAGE_SHIFT; } +static inline int hstate_index(struct hstate *h) +{ + return h - hstates; +} + #else struct hstate {}; #define alloc_huge_page_node(h, nid) NULL @@ -320,6 +359,7 @@ static inline unsigned int pages_per_huge_page(struct hstate *h) return 1; } #define hstate_index_to_shift(index) 0 +#define hstate_index(h) 0 #endif #endif /* _LINUX_HUGETLB_H */ |