summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c34
1 files changed, 31 insertions, 3 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index def84d8bcf2d..ea2ed8e301ef 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1925,6 +1925,7 @@ void free_huge_folio(struct folio *folio)
pages_per_huge_page(h), folio);
hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
pages_per_huge_page(h), folio);
+ lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h));
mem_cgroup_uncharge(folio);
if (restore_reserve)
h->resv_huge_pages++;
@@ -2390,6 +2391,23 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
return folio;
}
+struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
+{
+ struct folio *folio;
+
+ spin_lock_irq(&hugetlb_lock);
+ folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid,
+ nmask);
+ if (folio) {
+ VM_BUG_ON(!h->resv_huge_pages);
+ h->resv_huge_pages--;
+ }
+
+ spin_unlock_irq(&hugetlb_lock);
+ return folio;
+}
+
/* folio migration callback function */
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
@@ -3076,6 +3094,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
if (!memcg_charge_ret)
mem_cgroup_commit_charge(folio, memcg);
+ lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h));
mem_cgroup_put(memcg);
return folio;
@@ -3284,6 +3303,7 @@ static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
{
unsigned long i;
char buf[32];
+ LIST_HEAD(folio_list);
for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
if (hstate_is_gigantic(h)) {
@@ -3293,14 +3313,18 @@ static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
struct folio *folio;
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
- folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
- &node_states[N_MEMORY]);
+ folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
+ &node_states[N_MEMORY], NULL);
if (!folio)
break;
- free_huge_folio(folio); /* free it into the hugepage allocator */
+ list_add(&folio->lru, &folio_list);
}
cond_resched();
}
+
+ if (!list_empty(&folio_list))
+ prep_and_add_allocated_folios(h, &folio_list);
+
if (i == h->max_huge_pages_node[nid])
return;
@@ -6331,6 +6355,10 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(hstate_index(h));
goto out_mutex;
+ } else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) {
+ /* This isn't supported in hugetlb. */
+ ret = VM_FAULT_SIGSEGV;
+ goto out_mutex;
}
}