summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBang Li <libang.li@antgroup.com>2024-07-05 11:23:09 +0800
committerAndrew Morton <akpm@linux-foundation.org>2024-07-12 15:52:21 -0700
commit26c7d8413aaf113a54b54f63e151416a5c5c2a88 (patch)
treea4af807ebbf178c37c6f4154c39c75106368b4e0
parent4c8763e84aae4d04d94b35aca9f7db6a8930ad77 (diff)
mm: thp: support "THPeligible" semantics for mTHP with anonymous shmem
After the commit 7fb1b252afb5 ("mm: shmem: add mTHP support for anonymous shmem"), we can configure different policies through the multi-size THP sysfs interface for anonymous shmem. But currently "THPeligible" indicates only whether the mapping is eligible for allocating THP-pages as well as the THP is PMD mappable or not for anonymous shmem, we need to support semantics for mTHP with anonymous shmem similar to those for mTHP with anonymous memory. Link: https://lkml.kernel.org/r/20240705032309.24933-1-libang.li@antgroup.com Signed-off-by: Bang Li <libang.li@antgroup.com> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Lance Yang <ioworker0@gmail.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/shmem_fs.h9
-rw-r--r--mm/huge_memory.c13
-rw-r--r--mm/shmem.c9
3 files changed, 19 insertions, 12 deletions
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 3fb18f7eb73e..1d06b1e5408a 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -113,12 +113,21 @@ int shmem_unuse(unsigned int type);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
struct mm_struct *mm, unsigned long vm_flags);
+unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ struct vm_area_struct *vma, pgoff_t index,
+ bool global_huge);
#else
static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
struct mm_struct *mm, unsigned long vm_flags)
{
return false;
}
+static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ struct vm_area_struct *vma, pgoff_t index,
+ bool global_huge)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_SHMEM
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f8b5cbd4dd71..9ec64aa2be94 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -151,10 +151,15 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
* Must be done before hugepage flags check since shmem has its
* own flags.
*/
- if (!in_pf && shmem_file(vma->vm_file))
- return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
- !enforce_sysfs, vma->vm_mm, vm_flags)
- ? orders : 0;
+ if (!in_pf && shmem_file(vma->vm_file)) {
+ bool global_huge = shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
+ !enforce_sysfs, vma->vm_mm, vm_flags);
+
+ if (!vma_is_anon_shmem(vma))
+ return global_huge ? orders : 0;
+ return shmem_allowable_huge_orders(file_inode(vma->vm_file),
+ vma, vma->vm_pgoff, global_huge);
+ }
if (!vma_is_anonymous(vma)) {
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index f24177e9d5cc..921d59c3d669 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1622,7 +1622,7 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static unsigned long shmem_allowable_huge_orders(struct inode *inode,
+unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
bool global_huge)
{
@@ -1707,13 +1707,6 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
return orders;
}
#else
-static unsigned long shmem_allowable_huge_orders(struct inode *inode,
- struct vm_area_struct *vma, pgoff_t index,
- bool global_huge)
-{
- return 0;
-}
-
static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
struct address_space *mapping, pgoff_t index,
unsigned long orders)