summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/mm/transhuge.rst6
-rw-r--r--include/linux/huge_mm.h4
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/rmap.c2
4 files changed, 7 insertions, 8 deletions
diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst
index 03bbd0a19041..a9608fe51649 100644
--- a/Documentation/mm/transhuge.rst
+++ b/Documentation/mm/transhuge.rst
@@ -153,8 +153,8 @@ clear where references should go after split: it will stay on the head page.
Note that split_huge_pmd() doesn't have any limitations on refcounting:
pmd can be split at any point and never fails.
-Partial unmap and deferred_split_huge_page()
-============================================
+Partial unmap and deferred_split_folio()
+========================================
Unmapping part of THP (with munmap() or other way) is not going to free
memory immediately. Instead, we detect that a subpage of THP is not in use
@@ -166,6 +166,6 @@ the place where we can detect partial unmap. It also might be
counterproductive since in many cases partial unmap happens during exit(2) if
a THP crosses a VMA boundary.
-The function deferred_split_huge_page() is used to queue a page for splitting.
+The function deferred_split_folio() is used to queue a folio for splitting.
The splitting itself will happen when we get memory pressure via shrinker
interface.
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index b9978978a160..70bd867eba94 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -187,7 +187,7 @@ static inline int split_huge_page(struct page *page)
{
return split_huge_page_to_list(page, NULL);
}
-void deferred_split_huge_page(struct page *page);
+void deferred_split_folio(struct folio *folio);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct folio *folio);
@@ -340,7 +340,7 @@ static inline int split_huge_page(struct page *page)
{
return 0;
}
-static inline void deferred_split_huge_page(struct page *page) {}
+static inline void deferred_split_folio(struct folio *folio) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c23b0e01734b..868fcccdff72 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2815,9 +2815,8 @@ void free_transhuge_page(struct page *page)
free_compound_page(page);
}
-void deferred_split_huge_page(struct page *page)
+void deferred_split_folio(struct folio *folio)
{
- struct folio *folio = page_folio(page);
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
#ifdef CONFIG_MEMCG
struct mem_cgroup *memcg = folio_memcg(folio);
diff --git a/mm/rmap.c b/mm/rmap.c
index 0020474f46c1..a079d9964b9c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1427,7 +1427,7 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
*/
if (folio_test_pmd_mappable(folio) && folio_test_anon(folio))
if (!compound || nr < nr_pmdmapped)
- deferred_split_huge_page(&folio->page);
+ deferred_split_folio(folio);
}
/*