summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorDaniel Jordan <daniel.m.jordan@oracle.com>2019-04-26 10:26:37 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2019-05-15 11:54:00 +1000
commitc301916fbcd69a0172316d904797fc82d8ba19c9 (patch)
treed6b6183cf20582415ff3daca21e29be9088eaf68 /arch
parent631575b02a9838ef8407389291ab8afe087fd7c8 (diff)
mm: change locked_vm's type from unsigned long to atomic64_t
Patch series "convert locked_vm from unsigned long to atomic64_t" Taking and dropping mmap_sem to modify a single counter, locked_vm, is overkill when the counter could be synchronized separately. Make mmap_sem a little less coarse by changing locked_vm to an atomic, the 64-bit variety to avoid issues with overflow on 32-bit systems. If user-controlled values are used to increase locked_vm, multiple threads doing it at once on a 32-bit system could theoretically cause overflow, so in the absence of atomic overflow checking, the 64-bit counter on 32b is defensive programming. I wouldn't have thought to do it, but Jason Gunthorpe raised the same issue in the pinned_vm series: https://lore.kernel.org/linux-mm/20190115205311.GD22031@mellanox.com/ This is a more conservative alternative to [1] with no user-visible effects. Thanks to Alexey Kardashevskiy for pointing out the racy atomics and to Alex Williamson, Christoph Lameter, Ira Weiny, and Jason Gunthorpe for their comments on [1]. Davidlohr Bueso recently did a similar conversion for pinned_vm[2]. Testing 1. passes LTP mlock[all], munlock[all], fork, mmap, and mremap tests in an x86 kvm guest 2. a VFIO-enabled x86 kvm guest shows the same VmLck in /proc/pid/status before and after this change 3. cross-compiles on powerpc [1] https://lore.kernel.org/linux-mm/20190211224437.25267-1-daniel.m.jordan@oracle.com/ [2] https://lore.kernel.org/linux-mm/20190206175920.31082-1-dave@stgolabs.net/ This patch (of 6): Taking and dropping mmap_sem to modify a single counter, locked_vm, is overkill when the counter could be synchronized separately. Make mmap_sem a little less coarse by changing locked_vm to an atomic, the 64-bit variety to avoid issues with overflow on 32-bit systems. Link: http://lkml.kernel.org/r/20190402204158.27582-2-daniel.m.jordan@oracle.com Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Alan Tull <atull@kernel.org> Cc: Alexey Kardashevskiy <aik@ozlabs.ru> Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Christoph Lameter <cl@linux.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Moritz Fischer <mdf@kernel.org> Cc: Paul Mackerras <paulus@ozlabs.org> Cc: Wu Hao <hao.wu@intel.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Jason Gunthorpe <jgg@mellanox.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c14
-rw-r--r--arch/powerpc/mm/book3s64/iommu_api.c15
2 files changed, 16 insertions, 13 deletions
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index f100e331e69b..dd72b9d962f7 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -59,32 +59,34 @@ static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
{
long ret = 0;
+ s64 locked_vm;
if (!current || !current->mm)
return ret; /* process exited */
down_write(&current->mm->mmap_sem);
+ locked_vm = atomic64_read(&current->mm->locked_vm);
if (inc) {
unsigned long locked, lock_limit;
- locked = current->mm->locked_vm + stt_pages;
+ locked = locked_vm + stt_pages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
ret = -ENOMEM;
else
- current->mm->locked_vm += stt_pages;
+ atomic64_add(stt_pages, &current->mm->locked_vm);
} else {
- if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm))
- stt_pages = current->mm->locked_vm;
+ if (WARN_ON_ONCE(stt_pages > locked_vm))
+ stt_pages = locked_vm;
- current->mm->locked_vm -= stt_pages;
+ atomic64_sub(stt_pages, &current->mm->locked_vm);
}
pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid,
inc ? '+' : '-',
stt_pages << PAGE_SHIFT,
- current->mm->locked_vm << PAGE_SHIFT,
+ atomic64_read(&current->mm->locked_vm) << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK),
ret ? " - exceeded" : "");
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index 5c521f3924a5..bdd3fbab7818 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -55,30 +55,31 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
unsigned long npages, bool incr)
{
long ret = 0, locked, lock_limit;
+ s64 locked_vm;
if (!npages)
return 0;
down_write(&mm->mmap_sem);
-
+ locked_vm = atomic64_read(&mm->locked_vm);
if (incr) {
- locked = mm->locked_vm + npages;
+ locked = locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
ret = -ENOMEM;
else
- mm->locked_vm += npages;
+ atomic64_add(npages, &mm->locked_vm);
} else {
- if (WARN_ON_ONCE(npages > mm->locked_vm))
- npages = mm->locked_vm;
- mm->locked_vm -= npages;
+ if (WARN_ON_ONCE(npages > locked_vm))
+ npages = locked_vm;
+ atomic64_sub(npages, &mm->locked_vm);
}
pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
current ? current->pid : 0,
incr ? '+' : '-',
npages << PAGE_SHIFT,
- mm->locked_vm << PAGE_SHIFT,
+ atomic64_read(&mm->locked_vm) << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK));
up_write(&mm->mmap_sem);